[
  {
    "path": ".github/workflows/test.yml",
    "content": "name: CI\n\non:\n  pull_request:\n    branches: [ main ]\n  push:\n    branches:\n      - main\n    paths:\n      - '!README.md'\n\n\njobs:\n  build-and-test:\n    runs-on: ubuntu-24.04\n\n    steps:\n    - uses: actions/checkout@v4\n\n    - name: install dependencies\n      run: sudo apt-get update && sudo apt-get install -y llvm-14 clang-14 libc++-14-dev libc++abi-14-dev python3-minimal libgoogle-perftools-dev libboost-container-dev python3-dev libbsd-dev\n\n    - name: Cache Z3\n      id: cache-z3\n      uses: actions/cache@v4\n      with:\n        path: ~/z3\n        key: z3-4.15.4-x64-glibc-2.39\n\n    - name: Install Z3\n      run: |\n        if [ ! -d ~/z3 ]; then\n          wget https://github.com/Z3Prover/z3/releases/download/z3-4.15.4/z3-4.15.4-x64-glibc-2.39.zip\n          unzip z3-4.15.4-x64-glibc-2.39.zip\n          mv z3-4.15.4-x64-glibc-2.39 ~/z3\n        fi\n        sudo cp ~/z3/bin/z3 /usr/local/bin/\n        sudo cp ~/z3/bin/libz3.so /usr/local/lib/\n        sudo cp ~/z3/bin/libz3.a /usr/local/lib/\n        sudo cp -r ~/z3/include/* /usr/local/include/\n        sudo ldconfig\n    #   run: |\n    #     wget https://apt.llvm.org/llvm.sh\n    #     chmod +x llvm.sh\n    #     sudo ./llvm.sh 12 all\n\n    - name: get aflpp\n      uses: actions/checkout@v4\n      with:\n        repository: AFLplusplus/AFLplusplus\n        path: ${{ github.workspace }}/aflpp\n\n    - name: configure\n      run: CC=clang-14 CXX=clang++-14 cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=${{ github.workspace }}/install -DAFLPP_PATH=${{ github.workspace }}/aflpp\n\n    - name: build\n      run: CC=clang-14 CXX=clang++-14 cmake --build ${{ github.workspace }}/build\n\n    - name: install\n      run: CC=clang-14 CXX=clang++-14 cmake --install ${{ github.workspace }}/build\n\n    - name: install lit\n      run: pip install lit\n\n    - name: test\n      run: lit --verbose tests\n      working-directory: ${{ github.workspace }}/build\n"
  },
  {
    "path": ".gitignore",
    "content": ".vscode\n*.taint\n*.o\n*.so\n*.dwo\n*.bc\n*.ll\nGPATH\nGRTAGS\nGTAGS\nGSYMS\noutput*/\n*.tar.gz\n*.tar.xz\nauto/\n/bin/\n.DS_Store\n/build/\n/libcxx/\n!/libcxx/build_taint/lib/\n/install\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.13)\n\nproject(symsan VERSION 1.2.2 LANGUAGES C CXX ASM)\n\nfind_package(LLVM 14 REQUIRED CONFIG)\n\n# Find Z3 (minimum version 4.8.15 required for string theory APIs)\n# Prefer /usr/local over system\nfind_library(Z3_LIBRARY NAMES z3 PATHS /usr/local/lib NO_DEFAULT_PATH)\nif (NOT Z3_LIBRARY)\n    find_library(Z3_LIBRARY NAMES z3)\nendif()\nfind_path(Z3_INCLUDE_DIR NAMES z3.h PATHS /usr/local/include NO_DEFAULT_PATH)\nif (NOT Z3_INCLUDE_DIR)\n    find_path(Z3_INCLUDE_DIR NAMES z3.h)\nendif()\n\n# Check Z3 version\nif (Z3_INCLUDE_DIR)\n    file(READ \"${Z3_INCLUDE_DIR}/z3_version.h\" Z3_VERSION_CONTENT)\n    string(REGEX MATCH \"#define Z3_MAJOR_VERSION[ \\t]+([0-9]+)\" _ \"${Z3_VERSION_CONTENT}\")\n    set(Z3_VERSION_MAJOR ${CMAKE_MATCH_1})\n    string(REGEX MATCH \"#define Z3_MINOR_VERSION[ \\t]+([0-9]+)\" _ \"${Z3_VERSION_CONTENT}\")\n    set(Z3_VERSION_MINOR ${CMAKE_MATCH_1})\n    string(REGEX MATCH \"#define Z3_BUILD_NUMBER[ \\t]+([0-9]+)\" _ \"${Z3_VERSION_CONTENT}\")\n    set(Z3_VERSION_PATCH ${CMAKE_MATCH_1})\n    set(Z3_VERSION \"${Z3_VERSION_MAJOR}.${Z3_VERSION_MINOR}.${Z3_VERSION_PATCH}\")\n\n    message(STATUS \"Found Z3 version: ${Z3_VERSION}\")\n\n    # Require at least version 4.8.15\n    if (Z3_VERSION_MAJOR LESS 4 OR\n        (Z3_VERSION_MAJOR EQUAL 4 AND Z3_VERSION_MINOR LESS 8) OR\n        (Z3_VERSION_MAJOR EQUAL 4 AND Z3_VERSION_MINOR EQUAL 8 AND Z3_VERSION_PATCH LESS 15))\n        message(FATAL_ERROR \"Z3 version ${Z3_VERSION} found, but version 4.8.15 or later is required (for string theory APIs)\")\n    endif()\nendif()\n\nmessage(STATUS \"Z3_LIBRARY: ${Z3_LIBRARY}\")\nmessage(STATUS \"Z3_INCLUDE_DIR: ${Z3_INCLUDE_DIR}\")\nmessage(STATUS \"Z3_VERSION: ${Z3_VERSION}\")\n\nif (LLVM_FOUND)\n    message(STATUS \"LLVM_VERSION_MAJOR: ${LLVM_VERSION_MAJOR}\")\n    message(STATUS \"LLVM_VERSION_MINOR: ${LLVM_VERSION_MINOR}\")\n    message(STATUS \"LLVM_VERSION_PATCH: ${LLVM_VERSION_PATCH}\")\nelse()\n    message(FATAL_ERROR \"You haven't install LLVM !\")\nendif()\n\nif (NOT TARGET LLVMPassConfig)\n    add_library(LLVMPassConfig INTERFACE IMPORTED)\n    set_target_properties(LLVMPassConfig PROPERTIES\n        INTERFACE_COMPILE_OPTIONS \"-fno-rtti\" #-fpic\n        INTERFACE_INCLUDE_DIRECTORIES \"${LLVM_INCLUDE_DIRS}\"\n        INTERFACE_LINK_DIRECTORIES \"${LLVM_LIBRARY_DIRS}\"\n        INTERFACE_COMPILE_DEFINITIONS \"LLVM_VERSION_MAJOR=${LLVM_VERSION_MAJOR};LLVM_VERSION_MINOR=${LLVM_VERSION_MINOR};\"\n        # INTERFACE_LINK_OPTIONS \"-Wl,-znodelete\"\n    )\nendif()\n\ninclude_directories(${LLVM_INCLUDE_DIRS})\nadd_definitions(${LLVM_DEFINITIONS})\n\ninclude_directories(include)\n\nset(SYMSAN_BIN_DIR \"bin\")\nset(SYMSAN_LIB_DIR \"lib/symsan\")\n\nadd_subdirectory(compiler)\nadd_subdirectory(instrumentation)\nadd_subdirectory(runtime)\nadd_subdirectory(wrappers)\nadd_subdirectory(parsers)\nadd_subdirectory(solvers)\nadd_subdirectory(backend)\nadd_subdirectory(driver)\nadd_subdirectory(tests)\nadd_subdirectory(libcxx)\nadd_subdirectory(python)\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM ubuntu:noble\n\nENV DEBIAN_FRONTEND=noninteractive\nENV TZ=Etc/UTC\n\nENV DEBIAN_FRONTEND=noninteractive\nENV TZ=Etc/UTC\n\nWORKDIR /work\nCOPY . /work/symsan\n\nRUN apt-get update\nRUN apt-get install -y cmake llvm-14 clang-14 libc++-14-dev libc++abi-14-dev libunwind-14-dev \\\n    python3-minimal python-is-python3 zlib1g-dev git joe libprotobuf-dev\nRUN git clone --depth=1 --branch=v4.31c https://github.com/AFLplusplus/AFLplusplus /work/aflpp\nRUN cd /work/aflpp && make PERFORMANCE=1 LLVM_CONFIG=llvm-config-14 NO_NYX=1 source-only -j4 && make install\n\nRUN apt-get install -y libz3-dev libgoogle-perftools-dev libboost-container-dev python3-dev\nRUN apt clean\n\nRUN cd /work/symsan/ && mkdir -p build && \\\n    cd build && CC=clang-14 CXX=clang++-14 cmake -DCMAKE_INSTALL_PREFIX=. -DAFLPP_PATH=/work/aflpp ../  && \\\n    make -j4 && make install\n\nENV KO_CC=clang-14\nENV KO_CXX=clang++-14\nENV KO_USE_FASTGEN=1\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\n\n# SymSan: Time and Space Efficient Concolic Execution via Dynamic Data-Flow Analysis\n\nSymSan (Symbolic Sanitizer) is an efficient concolic execution engine based on the\nData-Floow Sanitizer (DFSan) framework. By modeling forward symbolic execution as\na dynamic data-flow analysis and leveraging the time and space efficient data-flow\ntracking infrastructure from DFSan, SymSan imposes much lower runtime overhead\nthan previous symbolic execution engines.\n\nSimilar to other compilation-based symbolic executor like [SymCC](https://github.com/eurecom-s3/symcc),\nSymSan uses compile-time `instrumentation` to insert symbolic execution logic into\nthe target program, and a `runtime` supporting library to maintain symbolic states\nduring execution.\n\nTo learn more, checkout our [paper](https://www.usenix.org/conference/usenixsecurity22/presentation/chen-ju) at USENIX Security 2022.\n\n## Building\n\nBecause SymSan leverages the shadow memory implementation from LLVM's sanitizers,\nit has more strict dependency on the LLVM version. Right now only LLVM 12 is tested.\n\n### Build Requirements\n\n- Linux-amd64 (Tested on Ubuntu 24.04)\n- [LLVM 14.0.6](http://llvm.org/docs/index.html): clang, libc++, libc++abi\n\n### Compilation\n\nCreate a `build` directory and execute the following commands in it:\n\n```shell\n$ CC=clang-14 CXX=clang++-14 cmake -DCMAKE_INSTALL_PREFIX=/path/to/install -DCMAKE_BUILD_TYPE=Release /path/to/symsan/source\n$ make\n$ make install\n```\n\n### Build in Docker\n\n```\ndocker build -t symsan .\n```\n\n### LIBCXX\n\nThe repo contains instrumented libc++ and libc++abi to support C++ programs.\nTo rebuild these libraries from source, execute the `rebuild.sh` script in the\n`libcxx` directory.\n\n**NOTE**: because the in-process solving module (`solver/z3.cpp`) uses Z3's C++ API\nand STL containers, so itself depends on the C++ libs. Due to such dependencies,\nyou'll see linking errors when building C++ targets when using this module.\nThough it's possible to resolve these errors by not instrumenting the dependencies\n(adding them to the [ABIList](https://clang.llvm.org/docs/DataFlowSanitizer.html#abi-list),\n then rebuild the C++ libs), we don't recommend using it for C++ targets.\nInstead, it's much cleaner to use ann out-of-process solving module like Fastgen.\n\n## Test\n\nTo verify the code works, try some simple tests\n(forked from [Angora](https://github.com/AngoraFuzzer/Angora),\nadapted by [@insuyun](https://github.com/insuyun) to lit):\n\n```\n$ pip install lit\n$ cd your_build_dir\n$ lit tests\n```\n\n### Environment Options\n\n* `KO_CC` specifies the clang to invoke, if the default version isn't clang-12,\n  set this variable to allow the compiler wrapper to find the correct clang.\n\n* `KO_CXX` specifies the clang++ to invoke, if the default version isn't clang++-12,\n  set this variable to allow the compiler wrapper to find the correct clang++.\n\n* `KO_USE_Z3` enables the in-process Z3-based solver. By default, it is disabled,\n  so SymSan will only perform symbolic constraint collection without solving.\n  SymSan also supports out-of-process solving, which provides better compatiblility.\n  Check [FastGen](https://github.com/R-Fuzz/fastgen).\n\n* `KO_USE_NATIVE_LIBCXX` enables using the native uninstrumented libc++ and libc++abi.\n\n* `KO_DONT_OPTIMIZE` don't override the optimization level to `O3`.\n\n### Hybrid Fuzzing\n\nSymSan needs a driver to perform hybrid fuzzing, like [FastGen](https://github.com/R-Fuzz/fastgen).\nIt could also be used as a custom mutator for [AFL++](https://github.com/AFLplusplus/AFLplusplus)\n(check the [plugin readme](driver/aflpp/README.md)).\n\nCheck out our integration with Magma to see how to compile and run targets:\n[aflplusplus_symsan](https://github.com/R-Fuzz/magma/tree/mazerunner/fuzzers/aflplusplus_symsan).\n\nIt should also be easy to use the [Python binding](https://github.com/R-Fuzz/symsan/tree/main/python).\n\nNOTE: fgtest is for running tests, not for continnous fuzzing, please don't use it for benchmark.\n\n## Documentation\n\nStill under construction, unfortunately. [DeepWiki](https://deepwiki.com/R-Fuzz/symsan) seems okay.\n\n## Reference\n\nTo cite SymSan in scientific work, please use the following BibTeX:\n\n``` bibtex\n@inproceedings {chen2022symsan,\n  author =       {Ju Chen and Wookhyun Han and Mingjun Yin and Haochen Zeng and\n                  Chengyu Song and Byoungyong Lee and Heng Yin and Insik Shin},\n  title =        {SymSan: Time and Space Efficient Concolic Execution via Dynamic Data-Flow Analysis},\n  booktitle =    {{USENIX} Security Symposium (Security)},\n  year =         2022,\n  url =          {https://www.usenix.org/conference/usenixsecurity22/presentation/chen-ju},\n  publisher =    {{USENIX} Association},\n  month =        aug,\n}\n```\n"
  },
  {
    "path": "backend/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 17)\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -g\")\n\nadd_library(Fastgen STATIC fastgen.cpp)\ntarget_compile_options(Fastgen PRIVATE -stdlib=libc++)\ntarget_include_directories(Fastgen PUBLIC\n    ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n)\ninstall (TARGETS Fastgen DESTINATION ${SYMSAN_LIB_DIR})\n"
  },
  {
    "path": "backend/fastgen.cpp",
    "content": "/*\n  The code is for out-of-process constraints solving with fastgen.\n\n   ------------------------------------------------\n\n   Written by Chengyu Song <csong@cs.ucr.edu> and\n              Ju Chen <jchen757@ucr.edu>\n\n   Copyright 2021-2025 UC Riverside. All rights reserved.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at:\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n */\n\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_file.h\"\n#include \"sanitizer_common/sanitizer_posix.h\"\n#include \"dfsan/dfsan.h\"\n\nusing namespace __dfsan;\n\nstatic uint32_t __instance_id;\nstatic uint32_t __session_id;\nstatic int __pipe_fd;\n\n// filter?\nSANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL uint32_t __taint_trace_callstack;\n\nstatic inline void __solve_cond(dfsan_label label, uint8_t result,\n                                uint8_t add_nested, uint8_t loop_flag,\n                                uint32_t cid, void *addr) {\n\n  if (__pipe_fd < 0)\n    return;\n\n  uint16_t flags = 0;\n  if (add_nested) flags |= F_ADD_CONS;\n\n  // set the loop flags according to branching results\n  switch (loop_flag) {\n    case TrueBranchLoopExit:\n      flags |= result ? F_LOOP_EXIT : F_LOOP_LATCH;\n      break;\n    case TrueBranchLoopLatch:\n      flags |= result ? F_LOOP_LATCH : F_LOOP_EXIT;\n      break;\n    case FalseBranchLoopExit:\n      flags |= result ? F_LOOP_LATCH : F_LOOP_EXIT;\n      break;\n    case FalseBranchLoopLatch:\n      flags |= result ? F_LOOP_EXIT : F_LOOP_LATCH;\n      break;\n    default:\n      // No loop flag or unrecognized flag, do nothing\n      break;\n  }\n\n  // send info\n  pipe_msg msg = {\n    .msg_type = cond_type,\n    .flags = flags,\n    .instance_id = __instance_id,\n    .addr = (uptr)addr,\n    .context = __taint_trace_callstack,\n    .id = cid,\n    .label = label,\n    .result = result\n  };\n\n  if (internal_write(__pipe_fd, &msg, sizeof(msg)) < 0) {\n    Die();\n  }\n}\n\nstatic inline void __send_ubi(dfsan_label label, uint64_t result,\n                              uint32_t cid, void *addr) {\n  if (__pipe_fd < 0)\n    return;\n\n  pipe_msg msg = {\n    .msg_type = memerr_type,\n    .flags = F_MEMERR_UBI,\n    .instance_id = __instance_id,\n    .addr = (uptr)addr,\n    .context = __taint_trace_callstack,\n    .id = cid,\n    .label = label,\n    .result = result\n  };\n\n  if (internal_write(__pipe_fd, &msg, sizeof(msg)) < 0) {\n    Die();\n  }\n}\n\nstatic struct switch_true_case {\n  dfsan_label label;\n  uint32_t cid;\n} __switch_true_case = {0};\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_cmp(dfsan_label op1, dfsan_label op2, uint32_t size,\n                  uint32_t predicate,\n                  uint64_t c1, uint64_t c2, uint32_t cid) {\n  if (op1 == 0 && op2 == 0)\n    return;\n\n  void *addr = __builtin_return_address(0);\n\n  if (op1 == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", op1, addr);\n    if (flags().solve_ub) __send_ubi(op1, c1, cid, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n  if (op2 == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", op2, addr);\n    if (flags().solve_ub) __send_ubi(op2, c2, cid, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n\n  AOUT(\"solving cmp: %u %u %u %d %lu %lu 0x%x @%p\\n\",\n       op1, op2, size, predicate, c1, c2, cid, addr);\n\n  // save info to a union table slot\n  uint8_t r = get_const_result(c1, c2, predicate);\n  dfsan_label temp = dfsan_union(op1, op2, (predicate << 8) | ICmp, size, c1, c2);\n\n  if (r) {\n    // for the true case, we want to save it to solve the last,\n    // so the nested constraint will not affect other cases\n    __switch_true_case.label = temp;\n    __switch_true_case.cid = cid;\n  } else {\n    // solve without add_nested\n    __solve_cond(temp, r, 0, 0, cid, addr);\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_switch_end(uint32_t cid) {\n  if (__switch_true_case.label == 0) {\n    return;\n  } else if (__switch_true_case.cid != cid) {\n    AOUT(\"WARNING: switch end cid mismatch %u vs %u\\n\",\n         __switch_true_case.cid, cid);\n    return;\n  }\n\n  void *addr = __builtin_return_address(0);\n\n  AOUT(\"solving switch end: %u 0x%x @%p\\n\",\n       __switch_true_case.label, cid, addr);\n\n  // solve the true case\n  __solve_cond(__switch_true_case.label, 1, 1, 0, cid, addr);\n  __switch_true_case.label = 0;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_cond(dfsan_label label, bool r, uint8_t flag, uint32_t cid) {\n  if (label == 0) {\n    // check for real loop exit\n    if (!(((flag & FalseBranchLoopExit) && !r) ||\n          ((flag & TrueBranchLoopExit) && r)))\n      return;\n  }\n\n  void *addr = __builtin_return_address(0);\n\n  if (label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", label, addr);\n    if (flags().solve_ub) __send_ubi(label, r, cid, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n\n  AOUT(\"solving cond: %u %u 0x%x 0x%x %p\\n\",\n       label, r, __taint_trace_callstack, cid, addr);\n\n  uint8_t add_nested = flag & UndefinedCheck ? 0 : 1;\n  uint8_t loop_flag = flag & LoopFlagMask;\n\n  // always add nested\n  __solve_cond(label, r, add_nested, loop_flag, cid, addr);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label\n__taint_trace_select(dfsan_label cond_label, dfsan_label true_label,\n                     dfsan_label false_label, uint8_t r, uint8_t true_op,\n                     uint8_t false_op, uint32_t cid) {\n  if (cond_label == 0)\n    return r ? true_label : false_label;\n\n  void *addr = __builtin_return_address(0);\n\n  if (cond_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", cond_label, addr);\n    if (flags().solve_ub) __send_ubi(cond_label, r, cid, addr);\n    if (flags().exit_on_memerror) Die();\n    else return r ? true_label : false_label;\n  }\n\n  AOUT(\"solving select: %u %u %u %u %u %u 0x%x @%p\\n\",\n       cond_label, true_label, false_label, r, true_op, false_op, cid, addr);\n\n  // check if it's actually a logical AND: select cond, label, false\n  if (true_label != 0 && false_op == 0) {\n    dfsan_label land = dfsan_union(cond_label, true_label, And, 1, r, true_op);\n    uint8_t lr = (r && true_op) ? 1 : 0;\n    __solve_cond(land, lr, 1, 0, cid, addr);\n    return land;\n  } else if (false_label != 0 && true_op == 1) {\n    // logical OR: select cond, true, label\n    dfsan_label lor = dfsan_union(cond_label, false_label, Or, 1, r, false_op);\n    uint8_t lr = (r || false_op) ? 1 : 0;\n    __solve_cond(lor, lr, 1, 0, cid, addr);\n    return lor;\n  } else {\n    // normal select?\n    AOUT(\"normal select?!\\n\");\n    __solve_cond(cond_label, r, 1, 0, cid, addr);\n    return r ? true_label : false_label;\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_indcall(dfsan_label label) {\n  if (label == 0)\n    return;\n\n  AOUT(\"tainted indirect call target: %d\\n\", label);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_gep(dfsan_label ptr_label, uint64_t ptr,\n                  dfsan_label index_label, int64_t index,\n                  uint64_t num_elems, uint64_t elem_size,\n                  int64_t current_offset, uint32_t cid) {\n  if (index_label == 0)\n    return;\n\n  void *addr = __builtin_return_address(0);\n\n  if (index_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", index_label, addr);\n    if (flags().solve_ub) __send_ubi(index_label, index, cid, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n  if (ptr_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", ptr_label, addr);\n    if (flags().solve_ub) __send_ubi(ptr_label, ptr, cid, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n\n  AOUT(\"tainted GEP index: %ld = %d, ne: %ld, es: %ld, offset: %ld\\n\",\n      index, index_label, num_elems, elem_size, current_offset);\n\n  if (__pipe_fd < 0)\n    return;\n\n  // send gep info, in two pieces\n  pipe_msg msg = {\n    .msg_type = gep_type,\n    .flags = 0,\n    .instance_id = __instance_id,\n    .addr = (uptr)addr,\n    .context = __taint_trace_callstack,\n    .label = index_label, // just in case\n    .result = (uint64_t)index\n  };\n\n  if (internal_write(__pipe_fd, &msg, sizeof(msg)) < 0) {\n    Die();\n  }\n\n  gep_msg gmsg = {\n    .ptr_label = ptr_label,\n    .index_label = index_label,\n    .ptr = ptr,\n    .index = index,\n    .num_elems = num_elems,\n    .elem_size = elem_size,\n    .current_offset = current_offset\n  };\n\n  // FIXME: assuming single writer so msg will arrive in the same order\n  if (internal_write(__pipe_fd, &gmsg, sizeof(gmsg)) < 0) {\n    Die();\n  }\n\n  return;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_offset(dfsan_label offset_label, s64 offset, unsigned size) {\n  return;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_memcmp(dfsan_label label) {\n  if (label == 0)\n    return;\n\n  void *addr = __builtin_return_address(0);\n\n  if (label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", label, addr);\n    if (flags().solve_ub) __send_ubi(label, 0, 0, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n\n  dfsan_label_info *info = get_label_info(label);\n\n  AOUT(\"tainted memcmp: %d, size: %d\\n\", label, info->size);\n\n  if (__pipe_fd < 0)\n    return;\n\n  uint16_t has_content = 1;\n  // if both operands are symbolic, skip sending the content\n  if ((info->l1 != CONST_LABEL && info->l2 != CONST_LABEL) || info->size == 0)\n    has_content = 0;\n\n  pipe_msg msg = {\n    .msg_type = memcmp_type,\n    .flags = has_content,\n    .instance_id = __instance_id,\n    .addr = (uptr)addr,\n    .context = __taint_trace_callstack,\n    .label = label, // just in case\n    .result = (uint64_t)info->size\n  };\n\n  if (internal_write(__pipe_fd, &msg, sizeof(msg)) < 0) {\n    Die();\n  }\n\n  if (!has_content)\n    return;\n\n  size_t msg_size = sizeof(memcmp_msg) + info->size;\n  memcmp_msg *mmsg = (memcmp_msg*)__builtin_alloca(msg_size);\n  mmsg->label = label;\n  // Copy concrete content: use op1 if l1 is concrete, else op2\n  void *concrete_ptr = (info->l1 == CONST_LABEL) ? (void*)info->op1.i : (void*)info->op2.i;\n  internal_memcpy(mmsg->content, concrete_ptr, info->size);\n  AOUT(\"sending memcmp content for label %d, size %u, msg_size=%lu\\n\", label, info->size, msg_size);\n\n  // FIXME: assuming single writer so msg will arrive in the same order\n  if (internal_write(__pipe_fd, mmsg, msg_size) < 0) {\n    Die();\n  }\n\n  return;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_memerr(dfsan_label ptr_label, uptr ptr, dfsan_label size_label,\n                     uint64_t size, uint16_t flag, void *addr) {\n  if (ptr_label == 0 && size_label == 0)\n    return;\n\n  if (__pipe_fd < 0)\n    return;\n\n  uint64_t r = 0;\n  switch(flag) {\n    case F_MEMERR_UAF: r = ptr; break;\n    case F_MEMERR_OLB: r = ptr; break;\n    case F_MEMERR_OUB: r = ptr + size; break;\n    case F_MEMERR_UBI: r = ptr; break;\n    default: return;\n  }\n\n  pipe_msg msg = {\n    .msg_type = memerr_type,\n    .flags = flag,\n    .instance_id = __instance_id,\n    .addr = (uptr)addr,\n    .context = __taint_trace_callstack,\n    .label = ptr_label, // just in case\n    .result = r\n  };\n\n  if (internal_write(__pipe_fd, &msg, sizeof(msg)) < 0) {\n    Die();\n  }\n}\n\nextern \"C\" void InitializeSolver() {\n  __instance_id = flags().instance_id;\n  __session_id = flags().session_id;\n  __pipe_fd = flags().pipe_fd;\n}\n"
  },
  {
    "path": "compiler/CMakeLists.txt",
    "content": "add_executable(KOClang ko_clang.c)\nset_target_properties(KOClang PROPERTIES OUTPUT_NAME \"ko-clang\")\n\nadd_custom_command(TARGET KOClang POST_BUILD\n    COMMAND ln -sf \"ko-clang\" \"ko-clang++\")\ninstall (TARGETS KOClang DESTINATION ${SYMSAN_BIN_DIR})\ninstall (FILES ${CMAKE_CURRENT_BINARY_DIR}/ko-clang++ DESTINATION ${SYMSAN_BIN_DIR})\n"
  },
  {
    "path": "compiler/ko_clang.c",
    "content": "/*\n  The code is modified from AFL's LLVM mode and Angora.\n\n   ------------------------------------------------\n\n   Written by Laszlo Szekeres <lszekeres@google.com> and\n              Michal Zalewski <lcamtuf@google.com>\n\n   Copyright 2015, 2016 Google Inc. All rights reserved.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at:\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n */\n\n#define KO_MAIN\n\n#include \"alloc_inl.h\"\n#include \"defs.h\"\n#include \"debug.h\"\n#include \"version.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#ifndef PATH_MAX\n#define PATH_MAX 4096\n#endif\n\nstatic char *obj_path;       /* Path to runtime libraries         */\nstatic char *taint_path;     /* Path to the taint pass            */\nstatic char **cc_params;     /* Parameters passed to the real CC  */\nstatic u32 cc_par_cnt = 1;   /* Param count, including argv0      */\nstatic u8 is_cxx = 0;\nstatic u8 use_native_cxx = 0;\nstatic u8 use_native_zlib = 1; /* Use system zlib by default */\n\n/* Try to find the executable from PATH */\nstatic char *find_executable_in_path(const char *filename) {\n  char *path = getenv(\"PATH\");\n  if (path == NULL) {\n    FATAL(\"Cannot get PATH env\");\n    return NULL;\n  }\n\n  char *prev = path;\n  char full_path[PATH_MAX];\n  size_t filename_len = strlen(filename);\n  while (1) {\n    char *colon = strstr(prev, \":\");\n    size_t len = colon ? colon - prev : strlen(prev);\n    if (len + 1 + filename_len + 1 >= PATH_MAX) {\n      WARNF(\"Path too long: %s\", prev);\n      continue;\n    }\n\n    // Construct the full path\n    memcpy(full_path, prev, len);\n    full_path[len] = '/';\n    memcpy(full_path + len + 1, filename, filename_len + 1);\n\n    // Check if the file exists and is executable\n    if (access(full_path, X_OK) == 0) {\n      return ck_strdup(full_path);\n    }\n\n    if (colon == NULL || *(colon + 1) == '\\0') {\n      break;\n    }\n    prev = colon + 1;\n  }\n\n  return NULL;\n}\n\n/* Try to find the runtime libraries. If that fails, abort. */\nstatic void find_obj(const char *argv0) {\n\n  char *slash;\n  char path[PATH_MAX];\n\n  if (strchr(argv0, '/') == NULL) {\n    char *exec_path = find_executable_in_path(argv0);\n    if (exec_path == NULL) {\n      FATAL(\"Cannot find the compiler (%s) in PATH\", argv0);\n    }\n    if (!realpath(exec_path, path)) {\n      FATAL(\"Cannot get real path of the compiler (%s): %s\", exec_path, strerror(errno));\n    }\n    ck_free(exec_path);\n  } else {\n    if (!realpath(argv0, path)) {\n      FATAL(\"Cannot get real path of the compiler (%s): %s\", argv0, strerror(errno));\n    }\n  }\n\n  slash = strrchr(path, '/');\n\n  if (slash) {\n    char *dir;\n    *slash = 0;\n    dir = ck_strdup(path);\n    *slash = '/';\n\n    taint_path = alloc_printf(\"%s/../lib/symsan/TaintPass.so\", dir);\n    if (!access(taint_path, R_OK)) {\n      obj_path = alloc_printf(\"%s/../lib/symsan\", dir);\n    } else {\n      FATAL(\"Unable to find 'TaintPass.so' at %s\", path);\n    }\n\n    ck_free(dir);\n  }\n}\n\nstatic void check_type(char *name) {\n  if (!strcmp(name, \"ko-clang++\")) {\n    is_cxx = 1;\n  }\n}\n\nstatic u8 check_if_assembler(u32 argc, char **argv) {\n  /* Check if a file with an assembler extension (\"s\" or \"S\") appears in argv */\n\n  while (--argc) {\n    const char *cur = *(++argv);\n\n    const char *ext = strrchr(cur, '.');\n    if (ext && (!strcmp(ext + 1, \"s\") || !strcmp(ext + 1, \"S\"))) {\n      return 1;\n    }\n  }\n\n  return 0;\n}\n\nstatic void add_runtime() {\n  if (getenv(\"KO_LIBRARY_PATH\")) {\n    cc_params[cc_par_cnt++] = alloc_printf(\"-L%s\", getenv(\"KO_LIBRARY_PATH\"));\n  }\n\n  cc_params[cc_par_cnt++] = \"-Wl,--whole-archive\";\n  cc_params[cc_par_cnt++] = alloc_printf(\"%s/libdfsan_rt-x86_64.a\", obj_path);\n  cc_params[cc_par_cnt++] = \"-Wl,--no-whole-archive\";\n  cc_params[cc_par_cnt++] =\n      alloc_printf(\"-Wl,--dynamic-list=%s/libdfsan_rt-x86_64.a.syms\", obj_path);\n\n  cc_params[cc_par_cnt++] = alloc_printf(\"-Wl,-T%s/taint.ld\", obj_path);\n\n  if (is_cxx && !use_native_cxx) {\n    // cc_params[cc_par_cnt++] = \"-Wl,--whole-archive\";\n    cc_params[cc_par_cnt++] = alloc_printf(\"%s/libc++.a\", obj_path);\n    cc_params[cc_par_cnt++] = alloc_printf(\"%s/libc++abi.a\", obj_path);\n    cc_params[cc_par_cnt++] = alloc_printf(\"%s/libunwind.a\", obj_path);\n    // cc_params[cc_par_cnt++] = \"-Wl,--no-whole-archive\";\n  } else {\n    cc_params[cc_par_cnt++] = \"-lc++\";\n    cc_params[cc_par_cnt++] = \"-lc++abi\";\n    cc_params[cc_par_cnt++] = \"-l:libunwind.so\";\n  }\n  cc_params[cc_par_cnt++] = \"-lrt\";\n\n  cc_params[cc_par_cnt++] = \"-Wl,--no-as-needed\";\n  cc_params[cc_par_cnt++] = \"-Wl,--gc-sections\"; // if darwin -Wl, -dead_strip\n  cc_params[cc_par_cnt++] = \"-ldl\";\n  cc_params[cc_par_cnt++] = \"-lpthread\";\n  cc_params[cc_par_cnt++] = \"-lm\";\n\n  if (use_native_zlib) {\n    cc_params[cc_par_cnt++] = \"-lz\";\n  }\n\n  if (getenv(\"KO_USE_Z3\")) {\n    cc_params[cc_par_cnt++] = \"-Wl,--whole-archive\";\n    cc_params[cc_par_cnt++] = alloc_printf(\"%s/libZ3Solver.a\", obj_path);\n    cc_params[cc_par_cnt++] = \"-Wl,--no-whole-archive\";\n    cc_params[cc_par_cnt++] = \"-L/usr/local/lib\";\n    cc_params[cc_par_cnt++] = \"-lz3\";\n    cc_params[cc_par_cnt++] = \"-Wl,-rpath,/usr/local/lib\";\n  }\n\n  if (getenv(\"KO_USE_FASTGEN\")) {\n    cc_params[cc_par_cnt++] = \"-Wl,--whole-archive\";\n    cc_params[cc_par_cnt++] = alloc_printf(\"%s/libFastgen.a\", obj_path);\n    cc_params[cc_par_cnt++] = \"-Wl,--no-whole-archive\";\n  }\n}\n\nstatic void add_taint_pass() {\n  cc_params[cc_par_cnt++] = \"-fexperimental-new-pass-manager\";\n  cc_params[cc_par_cnt++] = alloc_printf(\"-fplugin=%s\", taint_path); // to enable options\n  cc_params[cc_par_cnt++] = alloc_printf(\"-fpass-plugin=%s\", taint_path);\n  cc_params[cc_par_cnt++] = \"-mllvm\";\n  cc_params[cc_par_cnt++] =\n      alloc_printf(\"-taint-abilist=%s/dfsan_abilist.txt\", obj_path);\n\n  if (use_native_zlib) {\n    cc_params[cc_par_cnt++] = \"-mllvm\";\n    cc_params[cc_par_cnt++] =\n        alloc_printf(\"-taint-abilist=%s/zlib_abilist.txt\", obj_path);\n  }\n\n  if (getenv(\"KO_TRACE_FP\")) {\n    cc_params[cc_par_cnt++] = \"-mllvm\";\n    cc_params[cc_par_cnt++] = \"-taint-trace-float-pointer\";\n  }\n\n  if (getenv(\"KO_NO_TRACE_BOUND\")) {\n    cc_params[cc_par_cnt++] = \"-mllvm\";\n    cc_params[cc_par_cnt++] = \"-taint-trace-bound=false\";\n  }\n\n  if (getenv(\"KO_SOLVE_UB\")) {\n    cc_params[cc_par_cnt++] = \"-mllvm\";\n    cc_params[cc_par_cnt++] = \"-taint-solve-ub=true\";\n  }\n\n  if (is_cxx && use_native_cxx) {\n    cc_params[cc_par_cnt++] = \"-mllvm\";\n    cc_params[cc_par_cnt++] =\n        alloc_printf(\"-taint-abilist=%s/libc++_abilist.txt\", obj_path);\n  }\n}\n\nstatic void edit_params(u32 argc, char **argv) {\n\n  u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0;\n  u8 maybe_assembler = 0;\n  char *name;\n\n  cc_params = ck_alloc((argc + 128) * sizeof(char *));\n\n  name = strrchr(argv[0], '/');\n  if (!name)\n    name = argv[0];\n  else\n    name++;\n  check_type(name);\n\n  if (is_cxx) {\n    char *alt_cxx = getenv(\"KO_CXX\");\n    cc_params[0] = alt_cxx ? alt_cxx : \"clang++\";\n  } else {\n    char *alt_cc = getenv(\"KO_CC\");\n    cc_params[0] = alt_cc ? alt_cc : \"clang\";\n  }\n\n  maybe_assembler = check_if_assembler(argc, argv);\n\n  use_native_cxx = getenv(\"KO_USE_NATIVE_LIBCXX\") ? 1 : 0;\n\n  use_native_zlib = getenv(\"KO_NO_NATIVE_ZLIB\") ? 0 : 1;\n\n  /* Detect stray -v calls from ./configure scripts. */\n  if (argc == 1 && !strcmp(argv[1], \"-v\"))\n    maybe_linking = 0;\n\n  while (--argc) {\n    char *cur = *(++argv);\n    // FIXME\n    if (!strcmp(cur, \"-O1\") || !strcmp(cur, \"-O2\") || !strcmp(cur, \"-O3\")) {\n      //continue;\n    }\n    if (!strcmp(cur, \"-m32\"))\n      bit_mode = 32;\n    if (!strcmp(cur, \"-m64\"))\n      bit_mode = 64;\n\n    if (!strcmp(cur, \"-x\"))\n      x_set = 1;\n\n    if (!strcmp(cur, \"-c\") || !strcmp(cur, \"-S\") || !strcmp(cur, \"-E\"))\n      maybe_linking = 0;\n\n    if (!strncmp(cur, \"-fsanitize=\", strlen(\"-fsanitize=\"))) {\n      continue; // doesn't work together\n    }\n\n    if (!use_native_zlib && !strcmp(cur, \"-lz\"))\n      continue; // ignore -lz if we are using our own zlib\n\n    if (strstr(cur, \"FORTIFY_SOURCE\"))\n      fortify_set = 1;\n\n    if (!strcmp(cur, \"-shared\"))\n      maybe_linking = 0;\n\n    if (!strcmp(cur, \"-Wl,-z,defs\") || !strcmp(cur, \"-Wl,--no-undefined\"))\n      continue;\n\n    if (strstr(cur, \"-stdlib=\") == cur) {\n      // XXX: use native if the target prefers stdlibc++?\n      continue;\n    }\n\n    if (!strcmp(cur, \"-lc++\") || !strcmp(cur, \"-lc++abi\") || !strcmp(cur, \"-lunwind\") ||\n        strstr(cur, \"-l:libc++.so\") == cur ||\n        strstr(cur, \"-l:libc++abi.so\") == cur ||\n        strstr(cur, \"-l:libunwind.so\") == cur) {\n      // skip libc++, libc++abi, and libunwind\n      continue;\n    }\n\n    if (strstr(cur, \"libSymsanProxy.o\")) {\n      char* last = *(argv - 1);\n      if (last) {\n        if (!strcmp(last, \"-I\")) { // remove the -I\n          cc_params[cc_par_cnt - 1] = cur;\n          continue;\n        }\n        if (!strcmp(last, \"-L\")) { // remove the -L\n          cc_params[cc_par_cnt - 1] = cur;\n          continue;\n        }\n      }\n    }\n\n    cc_params[cc_par_cnt++] = cur;\n  }\n\n  if (getenv(\"KO_CONFIG\")) {\n    cc_params[cc_par_cnt] = NULL;\n    return;\n  }\n\n  if (!maybe_assembler) {\n    add_taint_pass();\n  }\n\n  cc_params[cc_par_cnt++] = \"-pie\";\n  cc_params[cc_par_cnt++] = \"-fpic\";\n  cc_params[cc_par_cnt++] = \"-Qunused-arguments\";\n  cc_params[cc_par_cnt++] = \"-fno-vectorize\";\n  cc_params[cc_par_cnt++] = \"-fno-slp-vectorize\";\n#if 0\n  cc_params[cc_par_cnt++] = \"-mno-mmx\";\n  cc_params[cc_par_cnt++] = \"-mno-sse\";\n  cc_params[cc_par_cnt++] = \"-mno-sse2\";\n  cc_params[cc_par_cnt++] = \"-mno-avx\";\n  cc_params[cc_par_cnt++] = \"-mno-sse3\";\n  cc_params[cc_par_cnt++] = \"-mno-sse4.1\";\n  cc_params[cc_par_cnt++] = \"-mno-sse4.2\";\n  cc_params[cc_par_cnt++] = \"-mno-ssse3\";\n  cc_params[cc_par_cnt++] = \"-mno-avx2\";\n  cc_params[cc_par_cnt++] = \"-mno-avx512f\";\n  cc_params[cc_par_cnt++] = \"-mno-avx512bw\";\n  cc_params[cc_par_cnt++] = \"-mno-avx512dq\";\n  cc_params[cc_par_cnt++] = \"-mno-avx512vl\";\n#endif\n\n  if (getenv(\"KO_HARDEN\")) {\n    cc_params[cc_par_cnt++] = \"-fstack-protector-all\";\n\n    if (!fortify_set)\n      cc_params[cc_par_cnt++] = \"-D_FORTIFY_SOURCE=2\";\n  }\n\n  if (!getenv(\"KO_DONT_OPTIMIZE\")) {\n    cc_params[cc_par_cnt++] = \"-g\";\n    cc_params[cc_par_cnt++] = \"-O3\";\n    cc_params[cc_par_cnt++] = \"-funroll-loops\";\n  }\n\n  if (is_cxx && !use_native_cxx) {\n    // FIXME: or use the same header\n    // cc_params[cc_par_cnt++] = alloc_printf(\"-I%s/../include/c++/v1\", obj_path);\n    cc_params[cc_par_cnt++] = \"-stdlib=libc++\";\n  }\n\n  if (maybe_linking) {\n\n    if (x_set) {\n      cc_params[cc_par_cnt++] = \"-x\";\n      cc_params[cc_par_cnt++] = \"none\";\n    }\n\n    add_runtime();\n\n    switch (bit_mode) {\n    case 0:\n      break;\n    case 32:\n      /* if (access(cc_params[cc_par_cnt - 1], R_OK)) */\n      FATAL(\"-m32 is not supported by your compiler\");\n      break;\n\n    case 64:\n      /* if (access(cc_params[cc_par_cnt - 1], R_OK)) */\n      // FATAL(\"-m64 is not supported by your compiler\");\n      break;\n    }\n  }\n\n  cc_params[cc_par_cnt] = NULL;\n}\n\n/* Main entry point */\n\nint main(int argc, char **argv) {\n\n  if (argc < 2) {\n\n    SAYF(\"\\n\"\n         \"This is a helper application for Kirenenko. It serves as a drop-in \"\n         \"replacement\\n\"\n         \"for clang, letting you recompile third-party code with the required \"\n         \"runtime\\n\"\n         \"instrumentation. A common use pattern would be one of the \"\n         \"following:\\n\\n\"\n\n         \"  CC=%s/ko-clang ./configure\\n\"\n         \"  CXX=%s/ko-clang++ ./configure\\n\\n\"\n\n         \"You can specify custom next-stage toolchain via KO_CC and KO_CXX.\"\n         \"You can set (e.g., export) KO_CONFIG=1 to avoid problems during \"\n         \"configure.\\n\"\n         \"Setting\\n\"\n         \"KO_HARDEN enables hardening optimizations in the compiled \"\n         \"code.\\n\\n\",\n         \"xx\", \"xx\");\n\n    exit(1);\n  }\n\n  find_obj(argv[0]);\n  edit_params(argc, argv);\n  for (int i = 0; i < cc_par_cnt; i++) {\n    printf(\"%s \", cc_params[i]);\n  }\n  printf(\"\\n\");\n  execvp(cc_params[0], (char **)cc_params);\n\n  FATAL(\"Oops, failed to execute '%s' - check your PATH\", cc_params[0]);\n\n  return 0;\n}\n\n"
  },
  {
    "path": "driver/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 14)\n\n## launcher lib\nadd_subdirectory(launcher)\n\n## simple driver for testing out-of-process solving\nadd_executable(FGTest fgtest.cpp)\nset_target_properties(FGTest PROPERTIES OUTPUT_NAME \"fgtest\")\ntarget_include_directories(FGTest PUBLIC\n    ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n)\ntarget_link_libraries(FGTest PRIVATE\n  launcher\n  z3parser\n  ${Z3_LIBRARY}\n  rt\n)\ninstall (TARGETS FGTest DESTINATION ${SYMSAN_BIN_DIR})\n\nif (DEFINED AFLPP_PATH)\n    add_subdirectory(aflpp)\nendif()\n\ninstall (CODE \"MESSAGE(STATUS \\\"Build & Install: libSymsanProxy.o\\\")\")\ninstall (CODE \"execute_process(COMMAND \\\n    ${CMAKE_INSTALL_PREFIX}/${SYMSAN_BIN_DIR}/ko-clang \\\n    -c ${CMAKE_CURRENT_SOURCE_DIR}/harness-proxy.c \\\n    -o ${CMAKE_INSTALL_PREFIX}/${SYMSAN_LIB_DIR}/libSymsanProxy.o)\")\n"
  },
  {
    "path": "driver/aflpp/CMakeLists.txt",
    "content": "## custom_mutation for AFL++\n\nproject(SymSanMutator C CXX)\n\nset(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 17)\n\n# set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -stdlib=libc++\")\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -O3 -g -mcx16 -march=native -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free\")\nif (ASAN_BUILD)\n  set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fsanitize=address\")\nendif()\n\nif (NOT IS_DIRECTORY \"${AFLPP_PATH}/include\")\n  message(FATAL_ERROR \"Invalid AFL++ directory path! ${AFLPP_PATH}\")\nendif()\n\nadd_library(SymSanMutator SHARED symsan.cpp )\ntarget_include_directories(SymSanMutator PRIVATE\n  ${CMAKE_CURRENT_SOURCE_DIR}/../../runtime\n  ${AFLPP_PATH}/include\n)\ntarget_link_libraries(SymSanMutator\n  launcher\n  rgd-parser\n  rgd-solver\n)\nif (ASAN_BUILD)\n  target_link_libraries(SymSanMutator\n    ${LLVM_BINARY_DIR}/lib/clang/12.0.1/lib/linux/libclang_rt.asan-x86_64.a\n  )\nendif()\n\ninstall (TARGETS SymSanMutator DESTINATION ${SYMSAN_BIN_DIR})\n"
  },
  {
    "path": "driver/aflpp/README.md",
    "content": "# A AFL++ plugin for using SYMSAN as a custom mutator\n\nlibafl should be a better option but due to my familarity with C++,\nthis is a temporary option.\n\n## HowTo\n\nA quick guide to how to use the plugin:\n\n### Compilation\n\nRight now I only have tested on Ubuntu 20.04 and 22.04.\n\nFirst, install dependencies:\n\n```\napt-get update\napt-get install -y lsb-release wget software-properties-common gnupg\nwget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh && ./llvm.sh 12\napt-get install -y cmake libc++-12-dev libc++abi-12-dev libunwind-12 python3-minimal python-is-python3 zlib1g-dev git gdb joe\napt-get install -y libz3-dev libgoogle-perftools-dev\n```\n\nNext, download and build AFL++:\n\n```\ngit clone --depth=1 https://github.com/AFLplusplus/AFLplusplus /workdir/aflpp\nENV LLVM_CONFIG=llvm-config-14\ncd /workdir/aflpp && CC=clang-14 CXX=clang++-14 make source-only && make install\n```\n\nNext, download symsan and build\n\n```\ngit clone https://github.com/R-Fuzz/symsan /workdir/symsan\ncd symsan/ && mkdir -p build && \\\n  cd build && CC=clang-14 CXX=clang++-14 cmake -DAFLPP_PATH=/workdir/aflpp ../  && \\\n  make -j && make install\n```\n\n### Build target binaries\n\nYou need to compile two binaries: one for normal fuzzing and one for symbolic tracing\n\nFor the normal fuzzing binary, you can set `AFL_LLVM_CMPLOG=0` to disable `cmplog` as the plugin does a similar job.\nPlease refer to the AFL++ manual for building options.\n\nFor the symbolic tracing binary, set the following env options\n\n* `KO_CC=clang-14`: use clang-14 as the C compiler, because SymSan is compiled as a LLVM-12 pass\n* `KO_CXX=clang++-14`: using clang++-14 as the C++ compiler\n* `KO_USE_FASTGEN=1`: using the out-of-process solving mode (i.e., decoupled tracing and solving)\n* `KO_DONT_OPTIMIZE=1` (optional): keep the original optimization level, otherwise override with `-O3`\n* `KO_NO_NATIVE_ZLIB` (optional): if you're using instrumented libz\n* `KO_USE_NATIVE_LIBCXX` (optional): if you want to use the native, *uninstrumented* standard C++ lib, the default option uses the instrumented `libc++` and `libc++-abi`.\n\nTo build the target, set `CC=/path/to/symsan/bin/ko-clang` and `CXX=/path/to/symsan/bin/ko-clang++`.\nIf the configuration fails, you can set `KO_CONFIG=1`, and unset it after configuration.\n\n### Fuzz\n\nAfter the two binaries are built, use AFL++ to fuzz it, use the following env options\nto load the plugin and control its behavior:\n\n* `AFL_CUSTOM_MUTATOR_LIBRARY=/path/to/symsan/bin/libSymSanMutator.so`: load the plugin\n* `SYMSAN_TARGET=/path/to/symsan-instrumented-binary`: symbolic tracing binary\n* `AFL_DISABLE_TRIM=1` (optional): for some targets (e.g., the `mini` test case), you may want to disable trim\n* `AFL_CUSTOM_MUTATOR_ONLY=1` (optional): if you only want to test the plugin\n* `SYMSAN_OUTPUT_DIR=/none/default/dir` (optional): a different directory to store temporary outputs from SymSan\n* `SYMSAN_USE_JIGSAW=1` (optional): use JIGSAW as the solver\n* `SYMSAN_USE_Z3=1` (optional): use Z3 as the solver\n* `SYMSAN_USE_NESTED=1` (optional): consider nested branches when constructing a solving task\n\n## Some high-level design\n\nThe custom mutator works in two main steps:\n\n1. In the interface function `afl_custom_fuzz_count`, the plugin spawns\n   a symsan-instrumented binary to collect the symbolic traces (i.e., the tracing\n   stage in libafl). For each event it wants to handle, it constructs a *solving task*.\n\n2. In the interface function `afl_custom_fuzz`, the plugin fetches a *solving task*,\n   solves it, and generate a new input (i.e., the mutation stage in libafl).\n   The newly generated input is then evaluated with the main fuzzing binary.\n   If the input is saved, the task is considered as successfully solved.\n\n## Extensions\n\nOne main motivation to move to libafl and afl++ custom mutator is to make the\nconcolic execution stage more extensible (than in fastgen). Following are some\ninterfaces that can be customized:\n\n* `rgd::CovManager` is in charge of determine whether an event from symsan should\n  be used to construct a solving task. The default one uses branch coverage\n  (similar to sancov `trace-pc-guard`) to filter events.\n\n* `rgd::TaskManager` is in charge of scheduling *solving tasks*. The default one\n  is a simple FIFO queue.\n\n* `rgd::Solver` is in charge of solving a solving task. Right now there are three\n  solvers, which works in a layered manner (i2s->jigsaw->z3):\n  if a task is solved by an earlier solver, it will skip the next solver; otherwise the next solver is invoked.\n    * The default one is a simple I2S solver, which uses tracing results to map input bytes to comparison\n      operands and generate a solution based on potential\n      [input-to-state correspondence](https://www.ndss-symposium.org/ndss-paper/redqueen-fuzzing-with-input-to-state-correspondence/).\n    * JIGSAW, which is our [JIT-based constraint solver](https://github.com/R-Fuzz/jigsaw).\n    * Z3\n\n"
  },
  {
    "path": "driver/aflpp/rgd.proto",
    "content": "syntax = \"proto3\";\n\npackage rgd;\n\n// AST node for symbolic expressions\nmessage AstNode {\n  uint32 kind = 1;\n  uint32 boolvalue = 2;  //used by bool expr\n  uint32 bits = 3;\n  string value = 4;  //used by constant expr\n  repeated AstNode children = 5;\n  string name = 6;  //used for debugging\n  uint32 index = 7;  //used by read expr for index and extract expr\n  uint32 label = 8;  //for expression dedup\n  uint32 hash = 9;  //for node dedup\n\tuint32 direction = 10;\n\tuint32 sessionid = 11;\n\tuint32 full = 12;\n}\n"
  },
  {
    "path": "driver/aflpp/symsan.cpp",
    "content": "/*\n  a custom mutator for AFL++\n  (c) 2023 - 2024 by Chengyu Song <csong@cs.ucr.edu>\n  License: Apache 2.0\n*/\n\n#include \"dfsan/dfsan.h\"\n\n#include \"ast.h\"\n#include \"task.h\"\n#include \"solver.h\"\n#include \"cov.h\"\n#include \"task_mgr.h\"\n\nextern \"C\" {\n#include \"afl-fuzz.h\"\n#include \"launch.h\"\n}\n\n#include \"parse-rgd.h\"\n\n#include <atomic>\n#include <unordered_map>\n#include <unordered_set>\n#include <utility>\n#include <vector>\n#include <queue>\n#include <memory>\n\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <sys/ipc.h>\n#include <sys/mman.h>\n#include <sys/select.h>\n#include <sys/shm.h>\n#include <sys/stat.h>\n#include <sys/time.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <fcntl.h>\n\nusing namespace __dfsan;\n\n#ifndef DEBUG\n#define DEBUG 0\n#endif\n\n#if !DEBUG\n#undef DEBUGF\n#define DEBUGF(_str...) do { } while (0)\n#endif\n\n#define PRINT_STATS 0\n\n#define MAX_AST_SIZE 200\n\n#define MIN_TIMEOUT 50U\n\n#define MAX_LOCAL_BRANCH_COUNTER 128\n\nstatic bool NestedSolving = false;\nstatic int TraceBounds = 0;\nstatic int ExitOnMemError = 1; // default is exit on memory error\nstatic int SolveUB = 0;\nstatic int ForceStdin = 0;\nstatic bool SaveSolved = false;\n\n#undef alloc_printf\n#define alloc_printf(_str...) ({ \\\n    char* _tmp; \\\n    s32 _len = snprintf(NULL, 0, _str); \\\n    if (_len < 0) FATAL(\"Whoa, snprintf() fails?!\"); \\\n    _tmp = (char*)ck_alloc(_len + 1); \\\n    snprintf((char*)_tmp, _len + 1, _str); \\\n    _tmp; \\\n  })\n\nusing solver_t = std::shared_ptr<rgd::Solver>;\nusing branch_ctx_t = std::shared_ptr<rgd::BranchContext>;\n\nenum mutation_state_t {\n  MUTATION_INVALID,\n  MUTATION_IN_VALIDATION,\n  MUTATION_VALIDATED,\n};\n\nstruct my_mutator_t {\n  my_mutator_t() = delete;\n  my_mutator_t(const afl_state_t *afl, rgd::TaskManager* tmgr, rgd::CovManager* cmgr) :\n    afl(afl), out_dir(NULL), out_file(NULL), symsan_bin(NULL),\n    argv(NULL), out_fd(-1), cur_queue_entry(NULL),\n    cur_mutation_state(MUTATION_INVALID), output_buf(NULL),\n    cur_task(nullptr), cur_solver_index(-1),\n    task_mgr(tmgr), cov_mgr(cmgr) {}\n\n  ~my_mutator_t() {\n    if (out_fd >= 0) close(out_fd);\n    ck_free(out_dir);\n    ck_free(out_file);\n    ck_free(output_buf);\n    ck_free(argv);\n    delete task_mgr;\n    delete cov_mgr;\n  }\n\n  const afl_state_t *afl;\n  char *out_dir;\n  char *out_file;\n  char *symsan_bin;\n  char **argv;\n  int out_fd;\n  u8* cur_queue_entry;\n  int cur_mutation_state;\n  u8* output_buf;\n  int log_fd;\n\n  std::unordered_set<u32> fuzzed_inputs;\n  rgd::TaskManager* task_mgr;\n  rgd::CovManager* cov_mgr;\n  rgd::RGDAstParser* parser;\n  std::vector<solver_t> solvers;\n\n  // XXX: well, we have to keep track of solving states\n  rgd::task_t cur_task;\n  size_t cur_solver_index;\n};\n\n// FIXME: find another way to make the union table hash work\nstatic dfsan_label_info *__dfsan_label_info;\nstatic const size_t MAX_LABEL = uniontable_size / sizeof(dfsan_label_info);\n\ndfsan_label_info* __dfsan::get_label_info(dfsan_label label) {\n  if (unlikely(label >= MAX_LABEL)) {\n    throw std::out_of_range(\"label too large \" + std::to_string(label));\n  }\n  return &__dfsan_label_info[label];\n}\n\n// FIXME: local filter?\nstatic std::unordered_map<uint32_t, uint8_t> local_counter;\nstatic std::unordered_set<uint32_t> local_index_filter;\n// staticstics\nstatic uint64_t total_branches = 0;\nstatic uint64_t branches_to_solve = 0;\nstatic uint64_t total_tasks = 0;\nstatic std::map<uint64_t, uint64_t> task_size_dist;\nstatic uint64_t solved_tasks = 0;\nstatic uint64_t solved_branches = 0;\n\nstatic void reset_global_caches(size_t buf_size) {\n  local_counter.clear();\n  local_index_filter.clear();\n}\n\nstatic void handle_cond(pipe_msg &msg, my_mutator_t *my_mutator) {\n  if (unlikely(msg.label == 0)) {\n    return;\n  } else if (unlikely(msg.label == kInitializingLabel)) {\n    WARNF(\"UBI branch cond @%p\\n\", (void*)msg.addr);\n    return;\n  }\n\n  total_branches += 1;\n\n  // apply a local (per input) branch filter\n  auto &lc = local_counter[msg.id];\n  if (lc > MAX_LOCAL_BRANCH_COUNTER) {\n    return;\n  } else {\n    lc += 1;\n  }\n\n  // prase flags\n  bool always_solve = (msg.flags & F_ADD_CONS) == 0;\n  bool loop_latch = (msg.flags & F_LOOP_LATCH) != 0;\n  bool loop_exit = (msg.flags & F_LOOP_EXIT) != 0;\n\n  const branch_ctx_t ctx = my_mutator->cov_mgr->add_branch((void*)msg.addr,\n      msg.id, msg.result != 0, msg.context, loop_latch, loop_exit);\n\n  branch_ctx_t neg_ctx = std::make_shared<rgd::BranchContext>();\n  *neg_ctx = *ctx;\n  neg_ctx->direction = !ctx->direction;\n\n  if (my_mutator->cov_mgr->is_branch_interesting(neg_ctx) || always_solve) {\n    // parse the uniont table AST to solving tasks\n    std::vector<uint64_t> tasks;\n    if (my_mutator->parser->parse_cond(msg.label, ctx->direction, msg.flags & F_ADD_CONS, tasks) != 0) {\n      WARNF(\"Failed to parse the condition %u, from input %s\\n\", msg.label, my_mutator->cur_queue_entry);\n      // symsan_terminate();\n      return;\n    }\n\n    // add the tasks to the task manager\n    for (auto const& task_id : tasks) {\n      auto task = my_mutator->parser->retrieve_task(task_id);\n      my_mutator->task_mgr->add_task(neg_ctx, task);\n#if PRINT_STATS\n      task_size_dist[task->constraints.size()] += 1;\n#endif\n    }\n\n    total_tasks += tasks.size();\n    branches_to_solve += 1;\n  }\n}\n\nstatic void handle_gep(gep_msg &gmsg, pipe_msg &msg, my_mutator_t *my_mutator) {\n  // msg.label === gmsg.index_label\n  if (unlikely(msg.label == 0)) {\n    return;\n  } else if (unlikely(msg.label == kInitializingLabel)) {\n    WARNF(\"UBI array index @%p\\n\", (void*)msg.addr);\n    return;\n  }\n\n  // apply a local (per input) index filter\n  if (!local_index_filter.insert(msg.label).second) {\n    return;\n  }\n\n  // parse the uniont table AST to solving tasks\n  std::vector<uint64_t> tasks;\n  if (my_mutator->parser->parse_gep(gmsg.ptr_label, gmsg.ptr, gmsg.index_label, gmsg.index,\n        gmsg.num_elems, gmsg.elem_size, gmsg.current_offset, false, tasks) != 0) {\n    WARNF(\"Failed to parse symbolic index %u, from input %s\\n\", gmsg.index_label, my_mutator->cur_queue_entry);\n    // symsan_terminate();\n    return;\n  }\n\n  // add the tasks to the task manager, with a dummy context\n  branch_ctx_t ctx = std::make_shared<rgd::BranchContext>();\n  ctx->addr = (void*)msg.addr;\n  ctx->direction = true;\n  for (auto const& task_id : tasks) {\n    auto task = my_mutator->parser->retrieve_task(task_id);\n    my_mutator->task_mgr->add_task(ctx, task);\n#if PRINT_STATS\n    task_size_dist[task->constraints.size()] += 1;\n#endif\n  }\n\n  total_tasks += tasks.size();\n}\n\n/// no splice input\nextern \"C\" void afl_custom_splice_optout(my_mutator_t *data) {\n  (void)(data);\n}\n\n/// @brief init the custom mutator\n/// @param afl aflpp state\n/// @param seed not used\n/// @return custom mutator state\nextern \"C\" my_mutator_t *afl_custom_init(afl_state *afl, unsigned int seed) {\n\n  (void)(seed);\n\n  struct stat st;\n  rgd::TaskManager *tmgr = new rgd::FIFOTaskManager();\n  rgd::CovManager *cmgr = new rgd::EdgeCovManager();\n  my_mutator_t *data = new my_mutator_t(afl, tmgr, cmgr);\n  if (!data) {\n    FATAL(\"afl_custom_init alloc\");\n    return NULL;\n  }\n  // always use the simpler i2s solver\n  data->solvers.emplace_back(std::make_shared<rgd::I2SSolver>());\n  if (getenv(\"SYMSAN_USE_JIGSAW\"))\n    data->solvers.emplace_back(std::make_shared<rgd::JITSolver>());\n  if (getenv(\"SYMSAN_USE_Z3\"))\n    data->solvers.emplace_back(std::make_shared<rgd::Z3Solver>());\n  // make nested solving optional too\n  if (getenv(\"SYMSAN_USE_NESTED\")) {\n    NestedSolving = true;\n  }\n  // enable trace bounds?\n  if (getenv(\"SYMSAN_TRACE_BOUNDS\")) {\n    TraceBounds = 1;\n  }\n  // disable exit on memory error\n  if (getenv(\"SYMSAN_DONT_EXIT_ON_MEMERROR\")) {\n    ExitOnMemError = 0;\n  }\n  if (getenv(\"SYMSAN_SOLVE_UB\")) {\n    TraceBounds = 1; // solve undefined depends on trace bounds\n    SolveUB = 1;\n  }\n  // XXX: force stdin? ugly hack for aixcc\n  if (getenv(\"SYMSAN_FORCE_STDIN\")) {\n    ForceStdin = 1;\n  }\n  // enable saving solved tasks\n  if (getenv(\"SYMSAN_SAVE_SOLVED\")) {\n    SaveSolved = true;\n  }\n\n  if (!(data->symsan_bin = getenv(\"SYMSAN_TARGET\"))) {\n    FATAL(\n        \"SYMSAN_TARGET not defined, this should point to the full path of the \"\n        \"symsan compiled binary.\");\n  }\n\n  if (!(data->out_dir = getenv(\"SYMSAN_OUTPUT_DIR\"))) {\n    data->out_dir = alloc_printf(\"%s/symsan\", afl->out_dir);\n  }\n\n  if (stat(data->out_dir, &st) && mkdir(data->out_dir, 0755)) {\n    PFATAL(\"Could not create the output directory %s\", data->out_dir);\n  }\n\n  // setup output file\n  char *out_file;\n  if (afl->file_extension) {\n    out_file = alloc_printf(\"%s/.cur_input.%s\", data->out_dir, afl->file_extension);\n  } else {\n    out_file = alloc_printf(\"%s/.cur_input\", data->out_dir);\n  }\n  if (data->out_dir[0] == '/') {\n    data->out_file = out_file;\n  } else {\n    char cwd[PATH_MAX];\n    if (getcwd(cwd, (size_t)sizeof(cwd)) == NULL) { PFATAL(\"getcwd() failed\"); }\n    data->out_file = alloc_printf(\"%s/%s\", cwd, out_file);\n    ck_free(out_file);\n  }\n\n  // create the output file\n  data->out_fd = open(data->out_file, O_RDWR | O_CREAT | O_TRUNC, 0644);\n  if (data->out_fd < 0) {\n    PFATAL(\"Failed to create output file %s: %s\\n\", data->out_file, strerror(errno));\n  }\n\n  // setup symsan launcher\n  __dfsan_label_info = (dfsan_label_info *)symsan_init(data->symsan_bin, uniontable_size);\n  if (__dfsan_label_info == (void *)-1) {\n    FATAL(\"Failed to init symsan launcher: %s\\n\", strerror(errno));\n  }\n\n  // setup the parser\n  data->parser = new rgd::RGDAstParser(__dfsan_label_info, uniontable_size, NestedSolving, MAX_AST_SIZE);\n  if (!data->parser) {\n    FATAL(\"Failed to create parser\\n\");\n  }\n\n  // allocate output buffer\n  data->output_buf = (u8 *)malloc(MAX_FILE+1);\n  if (!data->output_buf) {\n    FATAL(\"Failed to alloc output buffer\\n\");\n  }\n\n#if PRINT_STATS\n  char *log_f = getenv(\"SYMSAN_LOG_FILE\");\n  if (log_f) {\n    data->log_fd = open(log_f, O_RDWR | O_CREAT | O_TRUNC, 0644);\n    if (data->log_fd < 0) {\n      PFATAL(\"Failed to create log file: %s\\n\", strerror(errno));\n    }\n  } else {\n    data->log_fd = 2; // stderr by default\n  }\n#endif\n\n  return data;\n}\n\nextern \"C\" void afl_custom_deinit(my_mutator_t *data) {\n  symsan_destroy();\n  delete data;\n}\n\n/// @brief the trace stage for symsan\n/// @param data the custom mutator state\n/// @param buf input buffer\n/// @param buf_size\n/// @return the number of solving tasks\nextern \"C\" u32 afl_custom_fuzz_count(my_mutator_t *data, const u8 *buf,\n                                     size_t buf_size) {\n\n  // check the input id to see if it's been run before\n  // we don't use the afl_custom_queue_new_entry() because we may not\n  // want to solve all the tasks\n  u32 input_id = data->afl->queue_cur->id;\n  u32 timeout = std::min(MIN_TIMEOUT, data->afl->fsrv.exec_tmout);\n  if (data->fuzzed_inputs.find(input_id) != data->fuzzed_inputs.end()) {\n    return 0;\n  }\n  data->fuzzed_inputs.insert(input_id);\n\n  // record the name of the current queue entry\n  data->cur_queue_entry = data->afl->queue_cur->fname;\n  DEBUGF(\"Fuzzing %s\\n\", data->cur_queue_entry);\n\n  // FIXME: should we use the afl->queue_cur->fname instead?\n  // write the buf to the file\n  lseek(data->out_fd, 0, SEEK_SET);\n  ck_write(data->out_fd, buf, buf_size, data->out_file);\n  fsync(data->out_fd);\n  if (ftruncate(data->out_fd, buf_size)) {\n    WARNF(\"Failed to truncate output file: %s\\n\", strerror(errno));\n    return 0;\n  }\n\n  // setup argv in case of initialized\n  if (unlikely(!data->argv)) {\n    int argc = 0;\n    while (data->afl->argv[argc]) { argc++; }\n    data->argv = (char **)calloc(argc + 1, sizeof(char *));\n    if (!data->argv) {\n      FATAL(\"Failed to alloc argv\\n\");\n    }\n    for (int i = 0; i < argc; i++) {\n      if (strstr(data->afl->argv[i], (char*)data->afl->tmp_dir)) {\n        DEBUGF(\"Replacing %s with %s\\n\", data->afl->argv[i], data->out_file);\n        data->argv[i] = data->out_file;\n      } else {\n        data->argv[i] = data->afl->argv[i];\n      }\n    }\n    data->argv[argc] = NULL;\n    // setup symsan launcher\n    symsan_set_input(data->afl->fsrv.use_stdin ? \"stdin\" : data->out_file);\n    symsan_set_args(argc, data->argv);\n    symsan_set_debug(DEBUG);\n    symsan_set_bounds_check(TraceBounds);\n    symsan_set_exit_on_memerror(ExitOnMemError);\n    symsan_set_solve_ub(SolveUB);\n    symsan_set_force_stdin(ForceStdin);\n  }\n\n  // launch the symsan child process\n  int ret = symsan_run(data->out_fd);\n  if (ret < 0) {\n    WARNF(\"Failed to start symsan bin: %s\\n\", strerror(errno));\n    return 0;\n  } else if (ret > 0) {\n    WARNF(\"symsan_run failed %d\\n\", ret);\n    return 0;\n  }\n\n  pipe_msg msg;\n  gep_msg gmsg;\n  memcmp_msg *mmsg;\n  dfsan_label_info *info;\n  size_t msg_size;\n  u32 num_tasks = 0;\n  u32 num_msgs = 0;\n  bool timedout = false;\n  struct timeval start, end;\n  gettimeofday(&start, NULL);\n\n  // clear all caches\n  std::vector<symsan::input_t> inputs;\n  inputs.push_back({buf, buf_size});\n  data->parser->restart(inputs);\n  reset_global_caches(buf_size);\n\n  while (symsan_read_event(&msg, sizeof(msg), timeout) == sizeof(msg)) {\n    // create solving tasks\n    switch (msg.msg_type) {\n      // conditional branch\n      case cond_type:\n        handle_cond(msg, data);\n        break;\n      case gep_type:\n        if (symsan_read_event(&gmsg, sizeof(gmsg), 0) != sizeof(gmsg)) {\n          WARNF(\"Failed to receive gep msg: %s\\n\", strerror(errno));\n          break;\n        }\n        // double check\n        if (msg.label != gmsg.index_label) {\n          WARNF(\"Incorrect gep msg: %d vs %d\\n\", msg.label, gmsg.index_label);\n          break;\n        }\n        handle_gep(gmsg, msg, data);\n        break;\n      case memcmp_type:\n        if (msg.label == 0 || msg.label >= MAX_LABEL) {\n          WARNF(\"Invalid memcmp label: %d\\n\", msg.label);\n          break;\n        }\n        info = get_label_info(msg.label);\n        // if both operands are symbolic, no content to be read\n        if (info->l1 != CONST_LABEL && info->l2 != CONST_LABEL)\n          break;\n        // flags = 0 means both operands are symbolic thus no content to read\n        // if (!msg.flags)\n        //  break;\n        msg_size = sizeof(memcmp_msg) + msg.result;\n        mmsg = (memcmp_msg*)malloc(msg_size);\n        if (symsan_read_event(mmsg, msg_size, 0) != msg_size) {\n          WARNF(\"Failed to receive memcmp msg: %s\\n\", strerror(errno));\n          free(mmsg);\n          break;\n        }\n        // double check\n        if (msg.label != mmsg->label) {\n          WARNF(\"Incorrect memcmp msg: %d vs %d\\n\", msg.label, mmsg->label);\n          free(mmsg);\n          break;\n        }\n        // save the content\n        data->parser->record_memcmp(msg.label, mmsg->content, msg.result);\n        free(mmsg);\n        break;\n      case fsize_type:\n        break;\n      case memerr_type:\n        WARNF(\"Memory error detected @%p, type = %d\\n\", (void*)msg.addr, msg.flags);\n        break;\n      default:\n        break;\n    }\n    // naive deadloop detection\n    num_msgs += 1;\n    if (unlikely((num_msgs & 0xffffe000) != 0)) {\n      gettimeofday(&end, NULL);\n      if ((end.tv_sec - start.tv_sec) * 10 > timeout) {\n        // allow 100x slowdown, sec * 1000 > ms * 100\n        WARNF(\"Possible deadloop, break\\n\");\n        timedout = true;\n        break;\n      }\n    }\n  }\n\n  if (timedout) {\n    // kill the symsan process\n    symsan_terminate();\n  }\n\n  // reinit solving state\n  data->cur_task = nullptr;\n\n  size_t max_stages = data->solvers.size();\n  // to be conservative, we return the maximum number of possible mutations\n  return (u32)(data->task_mgr->get_num_tasks() * max_stages);\n\n}\n\nstatic void print_stats(my_mutator_t *data) {\n  dprintf(data->log_fd,\n    \"Total branches: %zu,\\n\"\\\n    \"Total tasks: %zu,\\n\"\\\n    \"Solved tasks: %zu,\\n\"\\\n    \"Solved branches: %zu\\n\",\n    total_branches, total_tasks, solved_tasks, solved_branches);\n  dprintf(data->log_fd, \"Task size distribution:\\n\");\n  for (auto const& kv : task_size_dist) {\n    dprintf(data->log_fd, \"\\t %zu: %zu\\n\", kv.first, kv.second);\n  }\n  for (auto &solver : data->solvers) {\n    solver->print_stats(data->log_fd);\n  }\n}\n\nextern \"C\"\nsize_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size,\n                       u8 **out_buf, uint8_t *add_buf, size_t add_buf_size,\n                       size_t max_size) {\n  (void)(add_buf);\n  (void)(add_buf_size);\n  (void)(max_size);\n  if (buf_size > MAX_FILE) {\n    *out_buf = buf;\n    return 0;\n  }\n\n  // try to get a task if we don't already have one\n  // or if we've find a valid solution from the previous mutation\n  if (!data->cur_task || data->cur_mutation_state == MUTATION_VALIDATED) {\n    data->cur_task = data->task_mgr->get_next_task();\n    if (!data->cur_task) {\n      DEBUGF(\"No more tasks to solve\\n\");\n      data->cur_mutation_state = MUTATION_INVALID;\n      *out_buf = buf;\n#if PRINT_STATS\n      print_stats(data);\n#endif\n      return 0;\n    }\n    // reset the solver and state\n    data->cur_solver_index = 0;\n    data->cur_mutation_state = MUTATION_INVALID;\n  }\n\n  // check the previous mutation state\n  if (data->cur_mutation_state == MUTATION_IN_VALIDATION) {\n    // oops, not solve, move on to next solver\n    data->cur_solver_index++;\n    if (data->cur_solver_index >= data->solvers.size()) {\n      // if reached the max solver, move on to the next task\n      data->cur_task = data->task_mgr->get_next_task();\n      if (!data->cur_task) {\n        DEBUGF(\"No more tasks to solve\\n\");\n        data->cur_mutation_state = MUTATION_INVALID;\n        *out_buf = buf;\n#if PRINT_STATS\n        print_stats(data);\n#endif\n        return 0;\n      }\n      data->cur_solver_index = 0; // reset solver index\n    }\n  }\n\n  // default return values\n  size_t new_buf_size = 0;\n  *out_buf = buf;\n  auto &solver = data->solvers[data->cur_solver_index];\n  auto ret = solver->solve(data->cur_task, buf, buf_size,\n      data->output_buf, new_buf_size);\n  if (likely(ret == rgd::SOLVER_SAT)) {\n    DEBUGF(\"task solved\\n\");\n    data->cur_mutation_state = MUTATION_IN_VALIDATION;\n    *out_buf = data->output_buf;\n    if (SaveSolved) {\n      // save the solved task\n      char *solved_file = alloc_printf(\"%s/id_%zu\", data->out_dir, solved_tasks);\n      if (solved_file != NULL) {\n        int fd = open(solved_file, O_RDWR | O_CREAT | O_TRUNC, 0644);\n        if (fd < 0) {\n          WARNF(\"Failed to create solved file %s: %s\\n\", solved_file, strerror(errno));\n        } else {\n          lseek(fd, 0, SEEK_SET);\n          ck_write(fd, data->output_buf, new_buf_size, solved_file);\n          close(fd);\n        }\n        free(solved_file);\n      }\n    }\n    solved_tasks += 1;\n  } else if (ret == rgd::SOLVER_TIMEOUT) {\n    // if not solved, move on to next stage\n    data->cur_mutation_state = MUTATION_IN_VALIDATION;\n  } else if (ret == rgd::SOLVER_UNSAT) {\n    // at any stage if the task is deemed unsolvable, just skip it\n    DEBUGF(\"task not solvable\\n\");\n    data->cur_task->skip_next = true;\n    data->cur_task = nullptr;\n  } else {\n    WARNF(\"Unknown solver return value %d\\n\", ret);\n    *out_buf = NULL;\n    new_buf_size = 0;\n  }\n\n  return new_buf_size;\n}\n\n\n// FIXME: use new queue entry as feedback to see if the last mutation is successful\nextern \"C\"\nuint8_t afl_custom_queue_new_entry(my_mutator_t * data,\n                                   const uint8_t *filename_new_queue,\n                                   const uint8_t *filename_orig_queue) {\n  // if we're in validation state and the current queue entry is the same as\n  // mark the constraints as solved\n  DEBUGF(\"new queue entry: %s\\n\", filename_new_queue);\n  if (data->cur_queue_entry == filename_orig_queue &&\n      data->cur_mutation_state == MUTATION_IN_VALIDATION) {\n    data->cur_mutation_state = MUTATION_VALIDATED;\n    if (data->cur_task) {\n      data->cur_task->skip_next = true;\n      solved_branches += 1;\n    }\n  }\n  return 0;\n}\n"
  },
  {
    "path": "driver/fgtest.cpp",
    "content": "#include \"defs.h\"\n#include \"debug.h\"\n#include \"version.h\"\n\n#include \"dfsan/dfsan.h\"\n\nextern \"C\" {\n#include \"launch.h\"\n}\n\n#include \"parse-z3.h\"\n\n#include <algorithm>\n#include <memory>\n#include <unordered_map>\n#include <unordered_set>\n#include <utility>\n#include <vector>\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <sys/mman.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <fcntl.h>\n\nusing namespace __dfsan;\n\n#define OPTIMISTIC 1\n\n#undef AOUT\n# define AOUT(...)                                      \\\n  do {                                                  \\\n    printf(__VA_ARGS__);                                \\\n  } while(false)\n\n// for input\nstatic char *input_buf;\nstatic size_t input_size;\n\n// for output\nstatic const char* __output_dir = \".\";\nstatic uint32_t __instance_id = 0;\nstatic uint32_t __session_id = 0;\nstatic uint32_t __current_index = 0;\nstatic int __enum_gep = 0;  // GEP enumeration enabled by default\nstatic z3::context __z3_context;\n\n// z3parser\nsymsan::Z3ParserSolver *__z3_parser = nullptr;\n\nstatic void generate_input(symsan::Z3ParserSolver::solution_t &solutions) {\n  using op_t = symsan::Z3ParserSolver::solution_op_t;\n\n  // Build the new input in memory to handle INSERT/DELETE properly\n  std::vector<uint8_t> new_input(input_buf, input_buf + input_size);\n\n  // Sort solutions by offset in descending order so INSERT/DELETE don't\n  // invalidate subsequent offsets\n  std::vector<size_t> order(solutions.size());\n  for (size_t i = 0; i < order.size(); ++i) order[i] = i;\n  std::sort(order.begin(), order.end(), [&solutions](size_t a, size_t b) {\n    return solutions[a].offset > solutions[b].offset;\n  });\n\n  for (size_t idx : order) {\n    const auto& sol = solutions[idx];\n    switch (sol.op) {\n      case op_t::SET:\n        if (sol.offset < new_input.size()) {\n          AOUT(\"SET offset %d = %x\\n\", sol.offset, sol.val);\n          new_input[sol.offset] = sol.val;\n        }\n        break;\n\n      case op_t::INSERT:\n        if (sol.offset <= new_input.size()) {\n          AOUT(\"INSERT %zu bytes at offset %d\\n\", sol.data.size(), sol.offset);\n          new_input.insert(new_input.begin() + sol.offset,\n                          sol.data.begin(), sol.data.end());\n        }\n        break;\n\n      case op_t::DELETE:\n        if (sol.offset < new_input.size()) {\n          size_t del_len = std::min((size_t)sol.len,\n                                    new_input.size() - sol.offset);\n          AOUT(\"DELETE %zu bytes at offset %d\\n\", del_len, sol.offset);\n          new_input.erase(new_input.begin() + sol.offset,\n                         new_input.begin() + sol.offset + del_len);\n        }\n        break;\n    }\n  }\n\n  // Write the new input to file\n  char path[PATH_MAX];\n  snprintf(path, PATH_MAX, \"%s/id-%d-%d-%d\", __output_dir,\n           __instance_id, __session_id, __current_index++);\n  int fd = open(path, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR);\n  if (fd == -1) {\n    AOUT(\"failed to open new input file for write\");\n    return;\n  }\n\n  AOUT(\"generate #%d output (size: %zu -> %zu)\\n\",\n       __current_index - 1, input_size, new_input.size());\n\n  if (write(fd, new_input.data(), new_input.size()) == -1) {\n    AOUT(\"failed to write new input\\n\");\n  }\n\n  close(fd);\n}\n\nstatic void __solve_cond(dfsan_label label, uint8_t r, bool add_nested, void *addr) {\n\n  AOUT(\"solving label %d = %d, add_nested: %d\\n\", label, r, add_nested);\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_cond(label, r, add_nested, tasks)) {\n    AOUT(\"WARNING: failed to parse condition %d @%p\\n\", label, addr);\n    return;\n  }\n\n  for (auto id : tasks) {\n    // solve\n    symsan::Z3ParserSolver::solution_t solutions;\n    auto status = __z3_parser->solve_task(id, 30000U, solutions);  // 30 seconds\n    if (solutions.size() != 0) {\n      AOUT(\"branch solved\\n\");\n      generate_input(solutions);\n    } else {\n      AOUT(\"branch not solvable @%p\\n\", addr);\n    }\n    solutions.clear();\n  }\n\n}\n\nstatic void __handle_gep(dfsan_label ptr_label, uptr ptr,\n                         dfsan_label index_label, int64_t index,\n                         uint64_t num_elems, uint64_t elem_size,\n                         int64_t current_offset, void* addr) {\n\n  AOUT(\"tainted GEP index: %ld = %d, ne: %ld, es: %ld, offset: %ld\\n\",\n      index, index_label, num_elems, elem_size, current_offset);\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_gep(ptr_label, ptr, index_label, index, num_elems,\n                             elem_size, current_offset, __enum_gep, tasks)) {\n    AOUT(\"WARNING: failed to parse gep %d @%p\\n\", index_label, addr);\n    return;\n  }\n\n  for (auto id : tasks) {\n    symsan::Z3ParserSolver::solution_t solutions;\n    auto status = __z3_parser->solve_task(id, 30000U, solutions);  // 30 seconds\n    if (solutions.size() != 0) {\n      AOUT(\"gep solved\\n\");\n      generate_input(solutions);\n    } else {\n      AOUT(\"gep not solvable @%p\\n\", addr);\n    }\n    solutions.clear();\n  }\n}\n\nint main(int argc, char* const argv[]) {\n\n  if (argc != 3) {\n    fprintf(stderr, \"Usage: %s target input\\n\", argv[0]);\n    exit(1);\n  }\n\n  char *program = argv[1];\n  char *input = argv[2];\n\n  int is_stdin = 0;\n  int solve_ub = 0;\n  int debug = 0;\n  char *options = getenv(\"TAINT_OPTIONS\");\n  if (options) {\n    // setup output dir\n    char *output = strstr(options, \"output_dir=\");\n    if (output) {\n      output += 11; // skip \"output_dir=\"\n      char *end = strchr(output, ':'); // try ':' first, then ' '\n      if (end == NULL) end = strchr(output, ' ');\n      size_t n = end == NULL? strlen(output) : (size_t)(end - output);\n      __output_dir = strndup(output, n);\n    }\n\n    // check if input is stdin\n    char *taint_file = strstr(options, \"taint_file=\");\n    if (taint_file) {\n      taint_file += strlen(\"taint_file=\"); // skip \"taint_file=\"\n      char *end = strchr(taint_file, ':');\n      if (end == NULL) end = strchr(taint_file, ' ');\n      size_t n = end == NULL? strlen(taint_file) : (size_t)(end - taint_file);\n      if (n == 5 && !strncmp(taint_file, \"stdin\", 5))\n        is_stdin = 1;\n    }\n\n    // check for debug\n    char *debug_opt = strstr(options, \"debug=\");\n    if (debug_opt) {\n      debug_opt += strlen(\"debug=\"); // skip \"debug=\"\n      if (strcmp(debug_opt, \"1\") == 0 || strcmp(debug_opt, \"true\") == 0)\n        debug = 1;\n    }\n\n    // check for session_id\n    char *session_opt = strstr(options, \"session_id=\");\n    if (session_opt) {\n      session_opt += strlen(\"session_id=\");\n      __session_id = atoi(session_opt);\n    }\n\n    // check if solve_ub is enabled\n    char *solve_ub_opt = strstr(options, \"solve_ub=\");\n    if (solve_ub_opt) {\n      solve_ub_opt += strlen(\"solve_ub=\"); // skip \"solve_ub=\"\n      if (strcmp(solve_ub_opt, \"1\") == 0 || strcmp(solve_ub_opt, \"true\") == 0)\n        solve_ub = 1;\n    }\n\n    // check if GEP enumeration is disabled\n    char *enum_gep_opt = strstr(options, \"enum_gep=\");\n    if (enum_gep_opt) {\n      enum_gep_opt += strlen(\"enum_gep=\"); // skip \"enum_gep=\"\n      if (strncmp(enum_gep_opt, \"0\", 1) == 0 || strncmp(enum_gep_opt, \"false\", 5) == 0)\n        __enum_gep = 0;\n    }\n  }\n\n  // load input file\n  struct stat st;\n  int input_fd = open(input, O_RDONLY);\n  if (input_fd == -1) {\n    fprintf(stderr, \"Failed to open input file: %s\\n\", strerror(errno));\n    exit(1);\n  }\n  fstat(input_fd, &st);\n  input_size = st.st_size;\n  input_buf = (char *)mmap(NULL, input_size, PROT_READ, MAP_PRIVATE, input_fd, 0);\n  if (input_buf == (void *)-1) {\n    fprintf(stderr, \"Failed to map input file: %s\\n\", strerror(errno));\n    exit(1);\n  }\n\n  // setup launcher\n  void *shm_base = symsan_init(program, uniontable_size);\n  if (shm_base == (void *)-1) {\n    fprintf(stderr, \"Failed to map shm: %s\\n\", strerror(errno));\n    exit(1);\n  }\n\n  if (symsan_set_input(is_stdin ? \"stdin\" : input) != 0) {\n    fprintf(stderr, \"Failed to set input\\n\");\n    exit(1);\n  }\n\n  char* args[3];\n  args[0] = program;\n  args[1] = input;\n  args[2] = NULL;\n  if (symsan_set_args(2, args) != 0) {\n    fprintf(stderr, \"Failed to set args\\n\");\n    exit(1);\n  }\n\n  symsan_set_debug(debug);\n  symsan_set_bounds_check(1);\n  symsan_set_solve_ub(solve_ub);\n\n  // launch the target\n  int ret = symsan_run(input_fd);\n  if (ret < 0) {\n    fprintf(stderr, \"Failed to launch target: %s\\n\", strerror(errno));\n    exit(1);\n  } else if (ret > 0) {\n    fprintf(stderr, \"SymSan launch error %d\\n\", ret);\n    exit(1);\n  }\n  close(input_fd);\n\n  // setup z3 parser\n  __z3_parser = new symsan::Z3ParserSolver(shm_base, uniontable_size, __z3_context);\n  std::vector<symsan::input_t> inputs;\n  inputs.push_back({(uint8_t*)input_buf, input_size});\n  if (__z3_parser->restart(inputs) != 0) {\n    fprintf(stderr, \"Failed to restart parser\\n\");\n    exit(1);\n  }\n\n  pipe_msg msg;\n  gep_msg gmsg;\n  size_t msg_size;\n  memcmp_msg *mmsg = nullptr;\n\n  while (symsan_read_event(&msg, sizeof(msg), 0) > 0) {\n    // solve constraints\n    switch (msg.msg_type) {\n      case cond_type:\n        __solve_cond(msg.label, msg.result, msg.flags & F_ADD_CONS, (void*)msg.addr);\n        break;\n      case gep_type:\n        if (symsan_read_event(&gmsg, sizeof(gmsg), 0) != sizeof(gmsg)) {\n          fprintf(stderr, \"Failed to receive gep msg: %s\\n\", strerror(errno));\n          break;\n        }\n        // double check\n        if (msg.label != gmsg.index_label) {\n          fprintf(stderr, \"Incorrect gep msg: %d vs %d\\n\", msg.label, gmsg.index_label);\n          break;\n        }\n        __handle_gep(gmsg.ptr_label, gmsg.ptr, gmsg.index_label, gmsg.index,\n                     gmsg.num_elems, gmsg.elem_size, gmsg.current_offset, (void*)msg.addr);\n        break;\n      case memcmp_type:\n        // flags = 0 means both operands are symbolic thus no content to read\n        if (!msg.flags)\n          break;\n        msg_size = sizeof(memcmp_msg) + msg.result;\n        mmsg = (memcmp_msg*)malloc(msg_size); // not freed until terminate\n        if (symsan_read_event(mmsg, msg_size, 0) != msg_size) {\n          fprintf(stderr, \"Failed to receive memcmp msg: %s\\n\", strerror(errno));\n          free(mmsg);\n          break;\n        }\n        // double check\n        if (msg.label != mmsg->label) {\n          fprintf(stderr, \"Incorrect memcmp msg: %d vs %d\\n\", msg.label, mmsg->label);\n          free(mmsg);\n          break;\n        }\n        // save the content\n        __z3_parser->record_memcmp(msg.label, mmsg->content, msg.result);\n        free(mmsg);\n        break;\n      case fsize_type:\n        break;\n      default:\n        break;\n    }\n  }\n\n  symsan_destroy();\n  exit(0);\n}\n\n"
  },
  {
    "path": "driver/harness-proxy.c",
    "content": "// Copyright 2021 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n#include <stdio.h>\n#include <stdint.h>\n#include <unistd.h>\n#include <string.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n\nextern int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size);\n__attribute__((weak)) int LLVMFuzzerInitialize(int *argc, char ***argv);\n__attribute__((weak)) void LLVMFuzzerCleanup(void);\n\nint main(int argc, char* argv[]) {\n\n    // Initialize the fuzzer if the function is available\n    if (LLVMFuzzerInitialize) {\n        LLVMFuzzerInitialize(&argc, &argv);\n    }\n\n    // open file\n    int fd = open(argv[1], O_RDONLY);\n    if (fd < 0) {\n        perror(\"open\");\n        return 1;\n    }\n    // get file size\n    struct stat st;\n    if (fstat(fd, &st) < 0) {\n        perror(\"fstat\");\n        close(fd);\n        return 1;\n    }\n    size_t fsize = st.st_size;\n\n    // read file contents\n    char *string = (char*)malloc(fsize);\n    if (read(fd, string, fsize) != fsize) {\n        perror(\"read\");\n        close(fd);\n        return 1;\n    }\n    close(fd);\n\n    // Now call into the harness\n    int retval = LLVMFuzzerTestOneInput((const uint8_t *)string, fsize);\n\n    free(string);\n\n    // Cleanup if the function is available\n    if (LLVMFuzzerCleanup) {\n        LLVMFuzzerCleanup();\n    }\n\n    return retval;\n}\n"
  },
  {
    "path": "driver/launcher/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 14)\n\nadd_library(launcher STATIC launch.c)\n"
  },
  {
    "path": "driver/launcher/launch.c",
    "content": "#include \"defs.h\"\n#include \"debug.h\"\n#include \"version.h\"\n#include \"launch.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <sys/ipc.h>\n#include <sys/mman.h>\n#include <sys/select.h>\n#include <sys/shm.h>\n#include <sys/stat.h>\n#include <sys/time.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <sys/resource.h>\n#include <fcntl.h>\n\n#undef alloc_printf\n#define alloc_printf(_str...) ({ \\\n    char* _tmp; \\\n    s32 _len = snprintf(NULL, 0, _str); \\\n    if (_len < 0) FATAL(\"Whoa, snprintf() fails?!\"); \\\n    _tmp = (char*)malloc(_len + 1); \\\n    snprintf((char*)_tmp, _len + 1, _str); \\\n    _tmp; \\\n  })\n\nstruct symsan_config {\n  char *symsan_bin;\n  char *input_file;\n  char **argv;\n  char *shm_name;\n  int shm_fd;\n  void *label_info;\n  int pipefds[2];\n  char *symsan_env;\n  int symsan_pid;\n  size_t shm_size;\n\n  int is_input_file;\n  int is_input_sdtin;\n  int is_input_network;\n  int enable_debug;\n  int enable_bounds_check;\n  int enable_solve_ub;\n  int exit_on_memerror;\n  int trace_file_size;\n  int force_stdin;\n\n  int dev_null_fd;\n\n  int exit_status;\n  int is_killed;\n};\n\nstatic struct symsan_config g_config;\n\n__attribute__((visibility(\"default\")))\nvoid* symsan_init(const char *symsan_bin, const size_t uniontable_size) {\n\n  if (!symsan_bin) {\n    return (void *)-1;\n  }\n\n  g_config.symsan_bin = strdup(symsan_bin);\n  g_config.input_file = NULL;\n  g_config.argv = NULL;\n  g_config.shm_name = NULL;\n  g_config.shm_fd = -1;\n  g_config.label_info = NULL;\n  g_config.shm_size = uniontable_size;\n  g_config.pipefds[0] = -1;\n  g_config.pipefds[1] = -1;\n  g_config.symsan_env = NULL;\n  g_config.symsan_pid = -1;\n  g_config.is_input_file = 0;\n  g_config.is_input_sdtin = 0;\n  g_config.is_input_network = 0;\n  g_config.enable_debug = 0;\n  g_config.enable_bounds_check = 0;\n  g_config.enable_solve_ub = 0;\n  g_config.exit_on_memerror = 1;\n  g_config.trace_file_size = 0;\n  g_config.force_stdin = 0;\n  g_config.dev_null_fd = -1;\n  g_config.exit_status = 0;\n  g_config.is_killed = 0;\n\n  // open /dev/null\n  g_config.dev_null_fd = open(\"/dev/null\", O_RDWR);\n  if (g_config.dev_null_fd == -1) {\n    return (void *)-1;\n  }\n\n  // create a new shm name\n  g_config.shm_name = alloc_printf(\"/symsan-union-table-%d\", getpid());\n  if (!g_config.shm_name) {\n    return (void *)-1;\n  }\n  // create shm\n  g_config.shm_fd = shm_open(g_config.shm_name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);\n  if (g_config.shm_fd == -1) {\n    return (void *)-1;\n  }\n  // set the size of the shm\n  if (ftruncate(g_config.shm_fd, uniontable_size) == -1) {\n    return (void *)-1;\n  }\n  // clear O_CLOEXEC flag\n  fcntl(g_config.shm_fd, F_SETFD, fcntl(g_config.shm_fd, F_GETFD) & ~FD_CLOEXEC);\n  // mmap the shm\n  g_config.label_info = mmap(NULL, uniontable_size, PROT_READ, MAP_SHARED,\n      g_config.shm_fd, 0);\n\n  return g_config.label_info;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_input(const char *input) {\n  if (!input) {\n    return SYMSAN_INVALID_ARGS;\n  }\n\n  g_config.input_file = strdup(input);\n  if (!g_config.input_file) {\n    return SYMSAN_NO_MEMORY;\n  }\n\n  if (strcmp(input, \"stdin\") == 0) {\n    g_config.is_input_sdtin = 1;\n  } else if (strstr(input, \"tcp@\") == input) {\n    g_config.is_input_network = 1;\n  } else if (strstr(input, \"udp@\") == input) {\n    g_config.is_input_network = 1;\n  } else if (strstr(input, \"unix@\") == input) {\n    g_config.is_input_network = 1;\n  } else {\n    g_config.is_input_file = 1;\n  }\n\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_args(const int argc, char* const argv[]) {\n  if (argc < 1 || !argv) {\n    return SYMSAN_INVALID_ARGS;\n  }\n\n  g_config.argv = (char **)malloc(sizeof(char *) * (argc + 1));\n  if (!g_config.argv) {\n    return SYMSAN_NO_MEMORY;\n  }\n\n  int err = 0, i = 0;\n  for (;i < argc; i++) {\n    if (!argv[i]) {\n      err = SYMSAN_INVALID_ARGS;\n      goto error;\n    }\n\n    g_config.argv[i] = strdup(argv[i]);\n    if (!g_config.argv[i]) {\n      err = SYMSAN_NO_MEMORY;\n      goto error;\n    }\n  }\n  g_config.argv[argc] = NULL;\n\n  return 0;\n\nerror:\n  for (int j = 0; j < i; j++) {\n    free(g_config.argv[j]);\n  }\n  free(g_config.argv);\n  g_config.argv = NULL;\n  return err;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_debug(int enable) {\n  g_config.enable_debug = !!enable;\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_bounds_check(int enable) {\n  g_config.enable_bounds_check = !!enable;\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_solve_ub(int enable) {\n  g_config.enable_solve_ub = !!enable;\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_exit_on_memerror(int enable) {\n  g_config.exit_on_memerror = !!enable;\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_trace_file_size(int enable) {\n  g_config.trace_file_size = !!enable;\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_set_force_stdin(int enable) {\n  g_config.force_stdin = !!enable;\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_run(int fd) {\n  if (fd < 0) {\n    return SYMSAN_INVALID_ARGS;\n  }\n  if (!g_config.symsan_bin) {\n    return SYMSAN_MISSING_BIN;\n  }\n  if (!g_config.label_info) {\n    return SYMSAN_MISSING_SHM;\n  }\n  if (!g_config.input_file) {\n    return SYMSAN_MISSING_INPUT;\n  }\n  if (!g_config.argv) {\n    return SYMSAN_MISSING_ARGS;\n  }\n\n  if (g_config.is_input_network && !g_config.input_file) {\n    return SYMSAN_MISSING_INPUT;\n  }\n\n  // unlikely but double check\n  if (g_config.pipefds[0] != -1) {\n    close(g_config.pipefds[0]);\n  }\n  if (g_config.pipefds[1] != -1) {\n    close(g_config.pipefds[1]);\n  }\n  if (g_config.symsan_env == NULL) {\n    free(g_config.symsan_env);\n  }\n\n  int ret = pipe(g_config.pipefds);\n  if (ret != 0) {\n    return SYMSAN_NO_MEMORY;\n  }\n\n  // fds and configs could have been changed, so always set up new ones\n  g_config.symsan_env = alloc_printf(\n      \"taint_file=\\\"%s\\\":shm_fd=%d:pipe_fd=%d:debug=%d:trace_bounds=%d:\"\n      \"solve_ub=%d:exit_on_memerror=%d:trace_fsize=%d:force_stdin=%d\",\n      g_config.input_file, g_config.shm_fd, g_config.pipefds[1],\n      g_config.enable_debug, g_config.enable_bounds_check,\n      g_config.enable_solve_ub, g_config.exit_on_memerror,\n      g_config.trace_file_size, g_config.force_stdin);\n  if (g_config.symsan_env == NULL) {\n    return SYMSAN_NO_MEMORY;\n  }\n\n  if (g_config.enable_debug) {\n    fprintf(stderr, \"SYMSAN_ENV: %s\\n\", g_config.symsan_env);\n  }\n\n  g_config.symsan_pid = fork();\n  if (g_config.symsan_pid == 0) {\n    // clear signal handlers and masks\n    sigset_t set;\n    sigemptyset(&set);\n    sigprocmask(SIG_SETMASK, &set, NULL);\n\n    // disable core dump as shadow mem is toooooo large\n    struct rlimit limit;\n    limit.rlim_cur = limit.rlim_max = 0;\n    setrlimit(RLIMIT_CORE, &limit);\n\n    close(g_config.pipefds[0]); // close the read fd\n    setenv(\"TAINT_OPTIONS\", (char*)g_config.symsan_env, 1);\n    unsetenv(\"LD_PRELOAD\"); // don't preload anything\n    if (g_config.is_input_sdtin) {\n      close(0);\n      lseek(fd, 0, SEEK_SET);\n      dup2(fd, 0);\n    }\n    if (!g_config.enable_debug) {\n      close(1);\n      close(2);\n      int dev_null_fd = open(\"/dev/null\", O_RDWR);\n      dup2(g_config.dev_null_fd, 1);\n      dup2(g_config.dev_null_fd, 2);\n    }\n    ret = execv(g_config.symsan_bin, g_config.argv);\n    return ret;\n  } else if (g_config.symsan_pid < 0) {\n    close(g_config.pipefds[0]);\n    close(g_config.pipefds[1]);\n    return g_config.symsan_pid;\n  }\n\n  free(g_config.symsan_env);\n  g_config.symsan_env = NULL;\n  close(g_config.pipefds[1]); // close the write fd\n  g_config.pipefds[1] = -1;\n  g_config.is_killed = 0; // reset kill flag\n\n  return 0;\n}\n\n__attribute__((visibility(\"default\")))\nssize_t symsan_read_event(void *buf, size_t size, unsigned int timeout) {\n  if (size == 0) {\n    return 0;\n  }\n\n  int ret = 1;\n\n  if (timeout) {\n    fd_set rfds;\n    struct timeval tv;\n\n    FD_ZERO(&rfds);\n    FD_SET(g_config.pipefds[0], &rfds);\n\n    tv.tv_sec = (timeout / 1000);\n    tv.tv_usec = (timeout % 1000) * 1000;\n\n    ret = select(g_config.pipefds[0] + 1, &rfds, NULL, NULL, &tv);\n  }\n\n  ssize_t n = -1;\n  if (ret > 0) { // no timeout or select okay\n    n = read(g_config.pipefds[0], buf, size);\n  } else {\n    // time out or error on select\n    kill(g_config.symsan_pid, SIGKILL);\n    g_config.is_killed = 1;\n  }\n\n  if (n != size) {\n    // error or EOF\n    waitpid(g_config.symsan_pid, &g_config.exit_status, 0);\n    g_config.symsan_pid = -1;\n    close(g_config.pipefds[0]); // close the read fd\n    g_config.pipefds[0] = -1;\n  }\n\n  return n;\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_terminate() {\n  if (g_config.symsan_pid == -1) {\n    // already terminated\n    return 0;\n  } else if (g_config.symsan_pid > 0) {\n    kill(g_config.symsan_pid, SIGKILL);\n    g_config.is_killed = 1;\n    waitpid(g_config.symsan_pid, &g_config.exit_status, 0);\n    g_config.symsan_pid = -1;\n    close(g_config.pipefds[0]);\n    return 0;\n  } else {\n    return -1;\n  }\n}\n\n__attribute__((visibility(\"default\")))\nint symsan_get_exit_status(int *status) {\n  if (!status) {\n    return -1;\n  }\n\n  *status = g_config.exit_status;\n  return g_config.is_killed;\n}\n\n__attribute__((visibility(\"default\")))\nvoid symsan_destroy() {\n  symsan_terminate();\n\n  if (g_config.label_info != NULL) {\n    munmap(g_config.label_info, g_config.shm_size);\n    g_config.label_info = NULL;\n  }\n\n  if (g_config.dev_null_fd != -1) {\n    close(g_config.dev_null_fd);\n    g_config.dev_null_fd = -1;\n  }\n\n  if (g_config.shm_fd != -1) {\n    close(g_config.shm_fd);\n    g_config.shm_fd = -1;\n  }\n\n  if (g_config.shm_name != NULL) {\n    shm_unlink(g_config.shm_name);\n    free(g_config.shm_name);\n    g_config.shm_name = NULL;\n  }\n\n  if (g_config.input_file != NULL) {\n    free(g_config.input_file);\n    g_config.input_file = NULL;\n  }\n\n  if (g_config.argv != NULL) {\n    for (int i = 0; g_config.argv[i]; i++) {\n      free(g_config.argv[i]);\n    }\n    free(g_config.argv);\n    g_config.argv = NULL;\n  }\n\n  if (g_config.symsan_env != NULL) {\n    free(g_config.symsan_env);\n    g_config.symsan_env = NULL;\n  }\n\n  if (g_config.symsan_bin != NULL) {\n    free(g_config.symsan_bin);\n    g_config.symsan_bin = NULL;\n  }\n\n  if (g_config.pipefds[0] != -1) {\n    close(g_config.pipefds[0]);\n    g_config.pipefds[0] = -1;\n  }\n\n  if (g_config.pipefds[1] != -1) {\n    close(g_config.pipefds[1]);\n    g_config.pipefds[1] = -1;\n  }\n}\n"
  },
  {
    "path": "include/alloc_inl.h",
    "content": "/*\n   american fuzzy lop - error-checking, memory-zeroing alloc routines\n   ------------------------------------------------------------------\n\n   Written and maintained by Michal Zalewski <lcamtuf@google.com>\n\n   Copyright 2013, 2014, 2015 Google Inc. All rights reserved.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at:\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n   This allocator is not designed to resist malicious attackers (the canaries\n   are small and predictable), but provides a robust and portable way to detect\n   use-after-free, off-by-one writes, stale pointers, and so on.\n\n */\n\n#ifndef _HAVE_ALLOC_INL_H\n#define _HAVE_ALLOC_INL_H\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include \"defs.h\"\n//#include \"types.h\"\n#include \"debug.h\"\n\n#define MAX_ALLOC           0x40000000\n\n/* User-facing macro to sprintf() to a dynamically allocated buffer. */\n\n#define alloc_printf(_str...) ({ \\\n    char* _tmp; \\\n    s32 _len = snprintf(NULL, 0, _str); \\\n    if (_len < 0) FATAL(\"Whoa, snprintf() fails?!\"); \\\n    _tmp = ck_alloc(_len + 1); \\\n    snprintf((char*)_tmp, _len + 1, _str); \\\n    _tmp; \\\n  })\n\n/* Macro to enforce allocation limits as a last-resort defense against\n   integer overflows. */\n\n#define ALLOC_CHECK_SIZE(_s) do { \\\n    if ((_s) > MAX_ALLOC) \\\n      ABORT(\"Bad alloc request: %u bytes\", (_s)); \\\n  } while (0)\n\n/* Macro to check malloc() failures and the like. */\n\n#define ALLOC_CHECK_RESULT(_r, _s) do { \\\n    if (!(_r)) \\\n      ABORT(\"Out of memory: can't allocate %u bytes\", (_s)); \\\n  } while (0)\n\n/* Magic tokens used to mark used / freed chunks. */\n\n#define ALLOC_MAGIC_C1  0xFF00FF00 /* Used head (dword)  */\n#define ALLOC_MAGIC_F   0xFE00FE00 /* Freed head (dword) */\n#define ALLOC_MAGIC_C2  0xF0       /* Used tail (byte)   */\n\n/* Positions of guard tokens in relation to the user-visible pointer. */\n\n#define ALLOC_C1(_ptr)  (((u32*)(_ptr))[-2])\n#define ALLOC_S(_ptr)   (((u32*)(_ptr))[-1])\n#define ALLOC_C2(_ptr)  (((u8*)(_ptr))[ALLOC_S(_ptr)])\n\n#define ALLOC_OFF_HEAD  8\n#define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1)\n\n/* Allocator increments for ck_realloc_block(). */\n\n#define ALLOC_BLK_INC    256\n\n/* Sanity-checking macros for pointers. */\n\n#define CHECK_PTR(_p) do { \\\n    if (_p) { \\\n      if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\\\n        if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \\\n          ABORT(\"Use after free.\"); \\\n        else ABORT(\"Corrupted head alloc canary.\"); \\\n      } \\\n      if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \\\n        ABORT(\"Corrupted tail alloc canary.\"); \\\n    } \\\n  } while (0)\n\n#define CHECK_PTR_EXPR(_p) ({ \\\n    typeof (_p) _tmp = (_p); \\\n    CHECK_PTR(_tmp); \\\n    _tmp; \\\n  })\n\n\n/* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized\n   requests. */\n\nstatic inline void* DFL_ck_alloc_nozero(u32 size) {\n\n  void* ret;\n\n  if (!size) return NULL;\n\n  ALLOC_CHECK_SIZE(size);\n  ret = malloc(size + ALLOC_OFF_TOTAL);\n  ALLOC_CHECK_RESULT(ret, size);\n\n  ret += ALLOC_OFF_HEAD;\n\n  ALLOC_C1(ret) = ALLOC_MAGIC_C1;\n  ALLOC_S(ret)  = size;\n  ALLOC_C2(ret) = ALLOC_MAGIC_C2;\n\n  return ret;\n\n}\n\n\n/* Allocate a buffer, returning zeroed memory. */\n\nstatic inline void* DFL_ck_alloc(u32 size) {\n\n  void* mem;\n\n  if (!size) return NULL;\n  mem = DFL_ck_alloc_nozero(size);\n\n  return memset(mem, 0, size);\n\n}\n\n\n/* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD\n   is set, the old memory will be also clobbered with 0xFF. */\n\nstatic inline void DFL_ck_free(void* mem) {\n\n  if (!mem) return;\n\n  CHECK_PTR(mem);\n\n#ifdef DEBUG_BUILD\n\n  /* Catch pointer issues sooner. */\n  memset(mem, 0xFF, ALLOC_S(mem));\n\n#endif /* DEBUG_BUILD */\n\n  ALLOC_C1(mem) = ALLOC_MAGIC_F;\n\n  free(mem - ALLOC_OFF_HEAD);\n\n}\n\n\n/* Re-allocate a buffer, checking for issues and zeroing any newly-added tail.\n   With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the\n   old memory is clobbered with 0xFF. */\n\nstatic inline void* DFL_ck_realloc(void* orig, u32 size) {\n\n  void* ret;\n  u32   old_size = 0;\n\n  if (!size) {\n\n    DFL_ck_free(orig);\n    return NULL;\n\n  }\n\n  if (orig) {\n\n    CHECK_PTR(orig);\n\n#ifndef DEBUG_BUILD\n    ALLOC_C1(orig) = ALLOC_MAGIC_F;\n#endif /* !DEBUG_BUILD */\n\n    old_size  = ALLOC_S(orig);\n    orig     -= ALLOC_OFF_HEAD;\n\n    ALLOC_CHECK_SIZE(old_size);\n\n  }\n\n  ALLOC_CHECK_SIZE(size);\n\n#ifndef DEBUG_BUILD\n\n  ret = realloc(orig, size + ALLOC_OFF_TOTAL);\n  ALLOC_CHECK_RESULT(ret, size);\n\n#else\n\n  /* Catch pointer issues sooner: force relocation and make sure that the\n     original buffer is wiped. */\n\n  ret = malloc(size + ALLOC_OFF_TOTAL);\n  ALLOC_CHECK_RESULT(ret, size);\n\n  if (orig) {\n\n    memcpy(ret + ALLOC_OFF_HEAD, orig + ALLOC_OFF_HEAD, MIN(size, old_size));\n    memset(orig + ALLOC_OFF_HEAD, 0xFF, old_size);\n\n    ALLOC_C1(orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;\n\n    free(orig);\n\n  }\n\n#endif /* ^!DEBUG_BUILD */\n\n  ret += ALLOC_OFF_HEAD;\n\n  ALLOC_C1(ret) = ALLOC_MAGIC_C1;\n  ALLOC_S(ret)  = size;\n  ALLOC_C2(ret) = ALLOC_MAGIC_C2;\n\n  if (size > old_size)\n    memset(ret + old_size, 0, size - old_size);\n\n  return ret;\n\n}\n\n\n/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up\n   repeated small reallocs without complicating the user code). */\n\nstatic inline void* DFL_ck_realloc_block(void* orig, u32 size) {\n\n#ifndef DEBUG_BUILD\n\n  if (orig) {\n\n    CHECK_PTR(orig);\n\n    if (ALLOC_S(orig) >= size) return orig;\n\n    size += ALLOC_BLK_INC;\n\n  }\n\n#endif /* !DEBUG_BUILD */\n\n  return DFL_ck_realloc(orig, size);\n\n}\n\n\n/* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */\n\nstatic inline char* DFL_ck_strdup(char* str) {\n\n  void* ret;\n  u32   size;\n\n  if (!str) return NULL;\n\n  size = strlen((char*)str) + 1;\n\n  ALLOC_CHECK_SIZE(size);\n  ret = malloc(size + ALLOC_OFF_TOTAL);\n  ALLOC_CHECK_RESULT(ret, size);\n\n  ret += ALLOC_OFF_HEAD;\n\n  ALLOC_C1(ret) = ALLOC_MAGIC_C1;\n  ALLOC_S(ret)  = size;\n  ALLOC_C2(ret) = ALLOC_MAGIC_C2;\n\n  return memcpy(ret, str, size);\n\n}\n\n\n/* Create a buffer with a copy of a memory block. Returns NULL for zero-sized\n   or NULL inputs. */\n\nstatic inline void* DFL_ck_memdup(void* mem, u32 size) {\n\n  void* ret;\n\n  if (!mem || !size) return NULL;\n\n  ALLOC_CHECK_SIZE(size);\n  ret = malloc(size + ALLOC_OFF_TOTAL);\n  ALLOC_CHECK_RESULT(ret, size);\n  \n  ret += ALLOC_OFF_HEAD;\n\n  ALLOC_C1(ret) = ALLOC_MAGIC_C1;\n  ALLOC_S(ret)  = size;\n  ALLOC_C2(ret) = ALLOC_MAGIC_C2;\n\n  return memcpy(ret, mem, size);\n\n}\n\n\n/* Create a buffer with a block of text, appending a NUL terminator at the end.\n   Returns NULL for zero-sized or NULL inputs. */\n\nstatic inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {\n\n  u8* ret;\n\n  if (!mem || !size) return NULL;\n\n  ALLOC_CHECK_SIZE(size);\n  ret = malloc(size + ALLOC_OFF_TOTAL + 1);\n  ALLOC_CHECK_RESULT(ret, size);\n  \n  ret += ALLOC_OFF_HEAD;\n\n  ALLOC_C1(ret) = ALLOC_MAGIC_C1;\n  ALLOC_S(ret)  = size;\n  ALLOC_C2(ret) = ALLOC_MAGIC_C2;\n\n  memcpy(ret, mem, size);\n  ret[size] = 0;\n\n  return ret;\n\n}\n\n\n#ifndef DEBUG_BUILD\n\n/* In non-debug mode, we just do straightforward aliasing of the above functions\n   to user-visible names such as ck_alloc(). */\n\n#define ck_alloc          DFL_ck_alloc\n#define ck_alloc_nozero   DFL_ck_alloc_nozero\n#define ck_realloc        DFL_ck_realloc\n#define ck_realloc_block  DFL_ck_realloc_block\n#define ck_strdup         DFL_ck_strdup\n#define ck_memdup         DFL_ck_memdup\n#define ck_memdup_str     DFL_ck_memdup_str\n#define ck_free           DFL_ck_free\n\n#define alloc_report()\n\n#else\n\n/* In debugging mode, we also track allocations to detect memory leaks, and the\n   flow goes through one more layer of indirection. */\n\n/* Alloc tracking data structures: */\n\n#define ALLOC_BUCKETS     4096\n\nstruct TRK_obj {\n  void *ptr;\n  char *file, *func;\n  u32  line;\n};\n\n#ifdef AFL_MAIN\n\nstruct TRK_obj* TRK[ALLOC_BUCKETS];\nu32 TRK_cnt[ALLOC_BUCKETS];\n\n#  define alloc_report() TRK_report()\n\n#else\n\nextern struct TRK_obj* TRK[ALLOC_BUCKETS];\nextern u32 TRK_cnt[ALLOC_BUCKETS];\n\n#  define alloc_report()\n\n#endif /* ^AFL_MAIN */\n\n/* Bucket-assigning function for a given pointer: */\n\n#define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS)\n\n\n/* Add a new entry to the list of allocated objects. */\n\nstatic inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,\n                                 u32 line) {\n\n  u32 i, bucket;\n\n  if (!ptr) return;\n\n  bucket = TRKH(ptr);\n\n  /* Find a free slot in the list of entries for that bucket. */\n\n  for (i = 0; i < TRK_cnt[bucket]; i++)\n\n    if (!TRK[bucket][i].ptr) {\n\n      TRK[bucket][i].ptr  = ptr;\n      TRK[bucket][i].file = (char*)file;\n      TRK[bucket][i].func = (char*)func;\n      TRK[bucket][i].line = line;\n      return;\n\n    }\n\n  /* No space available - allocate more. */\n\n  TRK[bucket] = DFL_ck_realloc_block(TRK[bucket],\n    (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));\n\n  TRK[bucket][i].ptr  = ptr;\n  TRK[bucket][i].file = (char*)file;\n  TRK[bucket][i].func = (char*)func;\n  TRK[bucket][i].line = line;\n\n  TRK_cnt[bucket]++;\n\n}\n\n\n/* Remove entry from the list of allocated objects. */\n\nstatic inline void TRK_free_buf(void* ptr, const char* file, const char* func,\n                                u32 line) {\n\n  u32 i, bucket;\n\n  if (!ptr) return;\n\n  bucket = TRKH(ptr);\n\n  /* Find the element on the list... */\n\n  for (i = 0; i < TRK_cnt[bucket]; i++)\n\n    if (TRK[bucket][i].ptr == ptr) {\n\n      TRK[bucket][i].ptr = 0;\n      return;\n\n    }\n\n  WARNF(\"ALLOC: Attempt to free non-allocated memory in %s (%s:%u)\",\n        func, file, line);\n\n}\n\n\n/* Do a final report on all non-deallocated objects. */\n\nstatic inline void TRK_report(void) {\n\n  u32 i, bucket;\n\n  fflush(0);\n\n  for (bucket = 0; bucket < ALLOC_BUCKETS; bucket++)\n    for (i = 0; i < TRK_cnt[bucket]; i++)\n      if (TRK[bucket][i].ptr)\n        WARNF(\"ALLOC: Memory never freed, created in %s (%s:%u)\",\n              TRK[bucket][i].func, TRK[bucket][i].file, TRK[bucket][i].line);\n\n}\n\n\n/* Simple wrappers for non-debugging functions: */\n\nstatic inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,\n                                 u32 line) {\n\n  void* ret = DFL_ck_alloc(size);\n  TRK_alloc_buf(ret, file, func, line);\n  return ret;\n\n}\n\n\nstatic inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,\n                                   const char* func, u32 line) {\n\n  void* ret = DFL_ck_realloc(orig, size);\n  TRK_free_buf(orig, file, func, line);\n  TRK_alloc_buf(ret, file, func, line);\n  return ret;\n\n}\n\n\nstatic inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file,\n                                         const char* func, u32 line) {\n\n  void* ret = DFL_ck_realloc_block(orig, size);\n  TRK_free_buf(orig, file, func, line);\n  TRK_alloc_buf(ret, file, func, line);\n  return ret;\n\n}\n\n\nstatic inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,\n                                  u32 line) {\n\n  void* ret = DFL_ck_strdup(str);\n  TRK_alloc_buf(ret, file, func, line);\n  return ret;\n\n}\n\n\nstatic inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,\n                                  const char* func, u32 line) {\n\n  void* ret = DFL_ck_memdup(mem, size);\n  TRK_alloc_buf(ret, file, func, line);\n  return ret;\n\n}\n\n\nstatic inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,\n                                      const char* func, u32 line) {\n\n  void* ret = DFL_ck_memdup_str(mem, size);\n  TRK_alloc_buf(ret, file, func, line);\n  return ret;\n\n}\n\n\nstatic inline void TRK_ck_free(void* ptr, const char* file,\n                                const char* func, u32 line) {\n\n  TRK_free_buf(ptr, file, func, line);\n  DFL_ck_free(ptr);\n\n}\n\n/* Aliasing user-facing names to tracking functions: */\n\n#define ck_alloc(_p1) \\\n  TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_alloc_nozero(_p1) \\\n  TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_realloc(_p1, _p2) \\\n  TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_realloc_block(_p1, _p2) \\\n  TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_strdup(_p1) \\\n  TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_memdup(_p1, _p2) \\\n  TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_memdup_str(_p1, _p2) \\\n  TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)\n\n#define ck_free(_p1) \\\n  TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)\n\n#endif /* ^!DEBUG_BUILD */\n\n#endif /* ! _HAVE_ALLOC_INL_H */\n"
  },
  {
    "path": "include/ast.h",
    "content": "#pragma once\n\n#include <stdint.h>\n#include <stdexcept>\n#include <string>\n#include <vector>\n\nnamespace rgd {\n  enum AstKind {\n    Bool, // 0\n    Constant, // 1\n    Read, // 2\n    Concat, // 3\n    Extract, // 4\n\n    ZExt, // 5\n    SExt, // 6\n\n    // Arithmetic\n    Add, // 7\n    Sub, // 8\n    Mul, // 9\n    UDiv, // 10\n    SDiv, // 11\n    URem, // 12\n    SRem, // 13\n    Neg,  // 14\n\n    // Bit\n    Not, // 15\n    And, // 16\n    Or, // 17\n    Xor, // 18\n    Shl, // 19\n    LShr, // 20\n    AShr, // 21\n\n    // Compare\n    Equal, // 22\n    Distinct, // 23\n    Ult, // 24\n    Ule, // 25\n    Ugt, // 26\n    Uge, // 27\n    Slt, // 28\n    Sle, // 29\n    Sgt, // 30\n    Sge, // 31\n\n    // Logical\n    LOr, // 32\n    LAnd, // 33\n    LNot, // 34\n\n    // Special\n    Ite, // 35\n    Load, // 36    to be worked with TT-Fuzzer\n    Memcmp, //37\n    MemcmpN, // 38\n\n    // Last\n    LastOp\n  };\n\n  static const char* AstKindName[] = {\n    \"Bool\",\n    \"Constant\",\n    \"Read\",\n    \"Concat\",\n    \"Extract\",\n    \"ZExt\",\n    \"SExt\",\n    \"Add\",\n    \"Sub\",\n    \"Mul\",\n    \"UDiv\",\n    \"SDiv\",\n    \"URem\",\n    \"SRem\",\n    \"Neg\",\n    \"Not\",\n    \"And\",\n    \"Or\",\n    \"Xor\",\n    \"Shl\",\n    \"LShr\",\n    \"AShr\",\n    \"Equal\",\n    \"Distinct\",\n    \"Ult\",\n    \"Ule\",\n    \"Ugt\",\n    \"Uge\",\n    \"Slt\",\n    \"Sle\",\n    \"Sgt\",\n    \"Sge\",\n    \"LOr\",\n    \"LAnd\",\n    \"LNot\",\n    \"Ite\",\n    \"Load\",\n    \"Memcmp\",\n    \"MemcmpN\",\n  };\n\n  static inline bool isRelationalKind(uint16_t kind) {\n    if (kind >= Equal && kind <= Sge)\n      return true;\n    else\n      return false;\n  }\n\n  static inline bool isBinaryOperation(uint16_t kind) {\n    if (kind >= Add && kind <= AShr && kind != Neg && kind != Not)\n      return true;\n    else\n      return false;\n  }\n\n  static inline uint16_t negate_cmp(uint16_t kind) {\n    switch (kind) {\n      case Equal: return Distinct;\n      case Distinct: return Equal;\n      case Ult: return Uge;\n      case Ule: return Ugt;\n      case Ugt: return Ule;\n      case Uge: return Ult;\n      case Slt: return Sge;\n      case Sle: return Sgt;\n      case Sgt: return Sle;\n      case Sge: return Slt;\n      default: return Bool;\n    }\n  }\n\n  static inline bool isSignedCmp(uint16_t kind) {\n    if (kind >= Slt && kind <= Sge)\n      return true;\n    else\n      return false;\n  }\n\n  class AstNode {\n  public:\n    AstNode(size_t size=32) : child0_(0), child1_(0), kind_(0), bits_(0), index_(0),\n      boolvalue_(0), is_root_(1), label_(0), hash_(0) {\n      root_ = new std::vector<AstNode>(); // only allocate if is root\n      root_->reserve(size + 1); // default capacity, +1 for dummy root\n      root_->emplace_back(AstNode(root_)); // add a dummy root\n    }\n    AstNode(std::vector<AstNode> *r) : root_(r), child0_(0), child1_(0),\n      kind_(0), bits_(0), index_(0), boolvalue_(0), is_root_(0), label_(0),\n      hash_(0) {} // don't allocate if not root\n    ~AstNode() { if (is_root_) delete root_; }\n\n    inline void CopyFrom(const AstNode& other) {\n      if (this->root_ == other.root_) {\n        // don't change is_root_ flag\n        child0_ = other.child0_;\n        child1_ = other.child1_;\n        kind_ = other.kind_;\n        bits_ = other.bits_;\n        index_ = other.index_;\n        boolvalue_ = other.boolvalue_;\n        label_ = other.label_;\n        hash_ = other.hash_;\n      } else {\n        RecursiveCopyFrom(other);\n      }\n    }\n\n    inline uint32_t children_size() const {\n      return (!!child0_) + (!!child1_);\n    }\n\n    inline const AstNode& children(uint32_t i) const {\n      if (i >= 2) throw std::out_of_range(\"children index out of range\");\n      return i == 0 ? root_->at(child0_) : root_->at(child1_);\n    }\n\n    inline AstNode* mutable_children(uint32_t i) {\n      if (i >= 2) throw std::out_of_range(\"children index out of range\");\n      return i == 0 ? &root_->at(child0_) : &root_->at(child1_);\n    }\n\n    AstNode* add_children() {\n      size_t size = root_->size();\n      // assert(size < root_->capacity() && \"cannot resize\");\n      if (size >= root_->capacity()) return nullptr;\n      if (child0_ == 0) child0_ = size;\n      else if (child1_ == 0) child1_ = size;\n      else return nullptr; //assert(false && \"too many children\");\n      root_->emplace_back(AstNode(root_));\n      return &root_->back();\n    }\n\n    inline void clear_children() { child0_ = child1_ = 0; }\n    inline void clear_children(uint32_t i) {\n      if (i >= 2) throw std::out_of_range(\"children index out of range\");\n      if (i == 1) child1_ = 0;\n      else child0_ = child1_; // pop child1 to child0\n    }\n\n    inline uint16_t kind() const { return kind_; }\n    inline void set_kind(uint16_t kind) { kind_ = kind; }\n    inline uint16_t bits() const { return bits_; }\n    inline void set_bits(uint16_t bits) { bits_ = bits; }\n    inline uint32_t index() const { return index_; }\n    inline void set_index(uint32_t index) { index_ = index; }\n    inline uint8_t boolvalue() const { return boolvalue_; }\n    inline void set_boolvalue(uint8_t value) { boolvalue_ = value ? 0 : 1; }\n    inline uint32_t label() const { return label_; }\n    inline void set_label(uint32_t label) { label_ = label; }\n    inline uint32_t hash() const { return hash_; }\n    inline void set_hash(uint32_t hash) { hash_ = hash; }\n  private:\n    std::vector<AstNode> *root_; // root of the AST\n    uint32_t child0_;\n    uint32_t child1_;\n    uint16_t kind_;\n    uint16_t bits_;\n    uint32_t index_ : 30;  //used by read expr for index and extract expr\n    uint8_t boolvalue_ : 1;  //used by bool expr\n    uint8_t is_root_ : 1; // true if this is the root of the AST\n    uint32_t label_;  //for expression dedup\n    uint32_t hash_;  //for node dedup\n\n    void RecursiveCopyFrom(const AstNode &other) {\n      // copy children\n      if (other.child0_) {\n        if (this->child0_ == 0) {\n          child0_ = root_->size();\n          root_->emplace_back(AstNode(root_));\n        }\n        root_->at(child0_).RecursiveCopyFrom(other.children(0));\n      } else {\n        child0_ = 0;\n      }\n      if (other.child1_) {\n        if (this->child1_ == 0) {\n          child1_ = root_->size();\n          root_->emplace_back(AstNode(root_));\n        }\n        root_->at(child1_).RecursiveCopyFrom(other.children(1));\n      } else {\n        child1_ = 0;\n      }\n      // copy other fields\n      kind_ = other.kind_;\n      bits_ = other.bits_;\n      index_ = other.index_;\n      boolvalue_ = other.boolvalue_;\n      label_ = other.label_;\n      hash_ = other.hash_;\n    }\n  };\n\n  static bool isEqualAstRecursive(const AstNode& lhs, const AstNode& rhs) {\n    \n    // number of operands and size of the operands must match\n    const int children_size = lhs.children_size();\n    if (children_size != rhs.children_size()) return false;\n    if (lhs.bits() != rhs.bits()) return false;\n    \n    if (lhs.kind() != rhs.kind()) {\n      // to maximize the reuse of JIT'ed functions, jigsaw does not\n      // care about which relational operator is used, as long as\n      // they are both relational operators\n      if (isRelationalKind(lhs.kind()) && isRelationalKind(rhs.kind())) {\n        // do nothing, fall through to compare operands\n      } else {\n        return false;\n      }\n    } else if (lhs.hash() != rhs.hash()) {\n      // if the kind is the same, then hash has to match\n      return false;\n    }\n    // compare each operand\n    for (int i = 0; i < children_size; i++) {\n      if (!isEqualAstRecursive(lhs.children(i), rhs.children(i)))\n        return false;\n    }\n    return true;\n  }\n\n  static inline bool isEqualAst(const AstNode& lhs, const AstNode& rhs) {\n    return isEqualAstRecursive(lhs, rhs);\n  }\n\n  static inline uint32_t xxhash(uint32_t h1, uint32_t h2, uint32_t h3) {\n    const uint32_t PRIME32_1 = 2654435761U;\n    const uint32_t PRIME32_2 = 2246822519U;\n    const uint32_t PRIME32_3 = 3266489917U;\n    const uint32_t PRIME32_4 =  668265263U;\n    const uint32_t PRIME32_5 =  374761393U;\n\n#define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))\n    uint32_t h32 = PRIME32_5;\n    h32 += h1 * PRIME32_3;\n    h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n    h32 += h2 * PRIME32_3;\n    h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n    h32 += h3 * PRIME32_3;\n    h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n #undef XXH_rotl32\n\n    h32 ^= h32 >> 15;\n    h32 *= PRIME32_2;\n    h32 ^= h32 >> 13;\n    h32 *= PRIME32_3;\n    h32 ^= h32 >> 16;\n\n    return h32;\n  }\n\n  static inline void buf_to_hex_string(const uint8_t *buf, unsigned length,\n                                       std::string &str) {\n    const char hex_table[16] = {\n        '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',\n        'a', 'b', 'c', 'd', 'e', 'f' };\n    \n    str.clear();\n    for (unsigned i = 0; i < length; ++i) {\n      uint8_t val = buf[i];\n      str.push_back(hex_table[val >> 4]);\n      str.push_back(hex_table[val & 0xf]);\n    }\n  }\n\n}; // namespace rgd\n"
  },
  {
    "path": "include/cov.h",
    "content": "#pragma once\n\n#include <stdint.h>\n#include <vector>\n#include <unordered_map>\n#include <memory>\n#include <utility>\n\nnamespace rgd {\n\nstruct BranchContext {\n  void *addr;\n  bool direction;\n};\n\nstruct HybridBranchContext : public BranchContext {\n  uint32_t id;\n};\n\nstruct ContextAwareBranchContext : public BranchContext {\n  uint32_t context;\n};\n\nstruct LoopAwareBranchContext : public BranchContext {\n  uint32_t loop_counter;\n};\n\nstruct HistoryAwareBranchContext : public BranchContext {\n  uint32_t history;\n};\n\nstruct FullBranchContext : public HybridBranchContext,\n                          public ContextAwareBranchContext,\n                          public LoopAwareBranchContext,\n                          public HistoryAwareBranchContext {\n};\n\nclass CovManager {\npublic:\n  virtual ~CovManager() {}\n  virtual const std::shared_ptr<BranchContext> // don't want the saved context to be modified\n    add_branch(void *addr, uint32_t id, bool direction, uint32_t context, bool is_loop_header, bool is_loop_exit) = 0;\n  virtual bool\n    is_branch_interesting(const std::shared_ptr<BranchContext> context) = 0;\n};\n\nclass EdgeCovManager : public CovManager {\nprivate:\n  using BranchTargets = std::pair<bool, bool>;\n  std::unordered_map<void*, BranchTargets> branches;\n  std::shared_ptr<BranchContext> _ctx;\n\npublic:\n  EdgeCovManager() { _ctx = std::make_shared<BranchContext>(); }\n\n  const std::shared_ptr<BranchContext>\n  add_branch(void *addr, uint32_t id, bool direction, uint32_t context, bool is_loop_header, bool is_loop_exit) override {\n    auto &itr = branches[addr];\n    itr.first |= direction? true : false;\n    itr.second |= direction? false : true;\n    _ctx->addr = addr;\n    _ctx->direction = direction;\n    return _ctx;\n  }\n\n  bool is_branch_interesting(const std::shared_ptr<BranchContext> context) override {\n    auto itr = branches.find(context->addr);\n    // assert(itr != branches.end());\n    if (context->direction) {\n      return itr->second.first == false;\n    } else {\n      return itr->second.second == false;\n    }\n  }\n};\n\n}; // namespace rgd"
  },
  {
    "path": "include/debug.h",
    "content": "/*\n   american fuzzy lop - debug / error handling macros\n   --------------------------------------------------\n\n   Written and maintained by Michal Zalewski <lcamtuf@google.com>\n\n   Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at:\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n */\n\n#ifndef _HAVE_DEBUG_H\n#define _HAVE_DEBUG_H\n\n#include <errno.h>\n\n#include \"defs.h\"\n\n/*******************\n * Terminal colors *\n *******************/\n\n#ifdef USE_COLOR\n\n#  define cBLK \"\\x1b[0;30m\"\n#  define cRED \"\\x1b[0;31m\"\n#  define cGRN \"\\x1b[0;32m\"\n#  define cBRN \"\\x1b[0;33m\"\n#  define cBLU \"\\x1b[0;34m\"\n#  define cMGN \"\\x1b[0;35m\"\n#  define cCYA \"\\x1b[0;36m\"\n#  define cLGR \"\\x1b[0;37m\"\n#  define cGRA \"\\x1b[1;90m\"\n#  define cLRD \"\\x1b[1;91m\"\n#  define cLGN \"\\x1b[1;92m\"\n#  define cYEL \"\\x1b[1;93m\"\n#  define cLBL \"\\x1b[1;94m\"\n#  define cPIN \"\\x1b[1;95m\"\n#  define cLCY \"\\x1b[1;96m\"\n#  define cBRI \"\\x1b[1;97m\"\n#  define cRST \"\\x1b[0m\"\n\n#  define bgBLK \"\\x1b[40m\"\n#  define bgRED \"\\x1b[41m\"\n#  define bgGRN \"\\x1b[42m\"\n#  define bgBRN \"\\x1b[43m\"\n#  define bgBLU \"\\x1b[44m\"\n#  define bgMGN \"\\x1b[45m\"\n#  define bgCYA \"\\x1b[46m\"\n#  define bgLGR \"\\x1b[47m\"\n#  define bgGRA \"\\x1b[100m\"\n#  define bgLRD \"\\x1b[101m\"\n#  define bgLGN \"\\x1b[102m\"\n#  define bgYEL \"\\x1b[103m\"\n#  define bgLBL \"\\x1b[104m\"\n#  define bgPIN \"\\x1b[105m\"\n#  define bgLCY \"\\x1b[106m\"\n#  define bgBRI \"\\x1b[107m\"\n\n#else\n\n#  define cBLK \"\"\n#  define cRED \"\"\n#  define cGRN \"\"\n#  define cBRN \"\"\n#  define cBLU \"\"\n#  define cMGN \"\"\n#  define cCYA \"\"\n#  define cLGR \"\"\n#  define cGRA \"\"\n#  define cLRD \"\"\n#  define cLGN \"\"\n#  define cYEL \"\"\n#  define cLBL \"\"\n#  define cPIN \"\"\n#  define cLCY \"\"\n#  define cBRI \"\"\n#  define cRST \"\"\n\n#  define bgBLK \"\"\n#  define bgRED \"\"\n#  define bgGRN \"\"\n#  define bgBRN \"\"\n#  define bgBLU \"\"\n#  define bgMGN \"\"\n#  define bgCYA \"\"\n#  define bgLGR \"\"\n#  define bgGRA \"\"\n#  define bgLRD \"\"\n#  define bgLGN \"\"\n#  define bgYEL \"\"\n#  define bgLBL \"\"\n#  define bgPIN \"\"\n#  define bgLCY \"\"\n#  define bgBRI \"\"\n\n#endif /* ^USE_COLOR */\n\n/*************************\n * Box drawing sequences *\n *************************/\n\n#ifdef FANCY_BOXES\n\n#  define SET_G1   \"\\x1b)0\"       /* Set G1 for box drawing    */\n#  define RESET_G1 \"\\x1b)B\"       /* Reset G1 to ASCII         */\n#  define bSTART   \"\\x0e\"         /* Enter G1 drawing mode     */\n#  define bSTOP    \"\\x0f\"         /* Leave G1 drawing mode     */\n#  define bH       \"q\"            /* Horizontal line           */\n#  define bV       \"x\"            /* Vertical line             */\n#  define bLT      \"l\"            /* Left top corner           */\n#  define bRT      \"k\"            /* Right top corner          */\n#  define bLB      \"m\"            /* Left bottom corner        */\n#  define bRB      \"j\"            /* Right bottom corner       */\n#  define bX       \"n\"            /* Cross                     */\n#  define bVR      \"t\"            /* Vertical, branch right    */\n#  define bVL      \"u\"            /* Vertical, branch left     */\n#  define bHT      \"v\"            /* Horizontal, branch top    */\n#  define bHB      \"w\"            /* Horizontal, branch bottom */\n\n#else\n\n#  define SET_G1   \"\"\n#  define RESET_G1 \"\"\n#  define bSTART   \"\"\n#  define bSTOP    \"\"\n#  define bH       \"-\"\n#  define bV       \"|\"\n#  define bLT      \"+\"\n#  define bRT      \"+\"\n#  define bLB      \"+\"\n#  define bRB      \"+\"\n#  define bX       \"+\"\n#  define bVR      \"+\"\n#  define bVL      \"+\"\n#  define bHT      \"+\"\n#  define bHB      \"+\"\n\n#endif /* ^FANCY_BOXES */\n\n/***********************\n * Misc terminal codes *\n ***********************/\n\n#define TERM_HOME     \"\\x1b[H\"\n#define TERM_CLEAR    TERM_HOME \"\\x1b[2J\"\n#define cEOL          \"\\x1b[0K\"\n#define CURSOR_HIDE   \"\\x1b[?25l\"\n#define CURSOR_SHOW   \"\\x1b[?25h\"\n\n/************************\n * Debug & error macros *\n ************************/\n\n/* Just print stuff to the appropriate stream. */\n\n#ifdef MESSAGES_TO_STDOUT\n#  define SAYF(x...)    printf(x)\n#else \n#  define SAYF(x...)    fprintf(stderr, x)\n#endif /* ^MESSAGES_TO_STDOUT */\n\n/* Show a prefixed warning. */\n\n#define WARNF(x...) do { \\\n    SAYF(cYEL \"[!] \" cBRI \"WARNING: \" cRST x); \\\n    SAYF(cRST \"\\n\"); \\\n  } while (0)\n\n/* Show a prefixed \"doing something\" message. */\n\n#define ACTF(x...) do { \\\n    SAYF(cLBL \"[*] \" cRST x); \\\n    SAYF(cRST \"\\n\"); \\\n  } while (0)\n\n/* Show a prefixed \"success\" message. */\n\n#define OKF(x...) do { \\\n    SAYF(cLGN \"[+] \" cRST x); \\\n    SAYF(cRST \"\\n\"); \\\n  } while (0)\n\n/* Show a prefixed fatal error message (not used in afl). */\n\n#define BADF(x...) do { \\\n    SAYF(cLRD \"\\n[-] \" cRST x); \\\n    SAYF(cRST \"\\n\"); \\\n  } while (0)\n\n/* Die with a verbose non-OS fatal error message. */\n\n#define FATAL(x...) do { \\\n    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \"\\n[-] PROGRAM ABORT : \" \\\n         cBRI x); \\\n    SAYF(cLRD \"\\n         Location : \" cRST \"%s(), %s:%u\\n\\n\", \\\n         __FUNCTION__, __FILE__, __LINE__); \\\n    exit(1); \\\n  } while (0)\n\n/* Die by calling abort() to provide a core dump. */\n\n#define ABORT(x...) do { \\\n    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \"\\n[-] PROGRAM ABORT : \" \\\n         cBRI x); \\\n    SAYF(cLRD \"\\n    Stop location : \" cRST \"%s(), %s:%u\\n\\n\", \\\n         __FUNCTION__, __FILE__, __LINE__); \\\n    abort(); \\\n  } while (0)\n\n/* Die while also including the output of perror(). */\n\n#define PFATAL(x...) do { \\\n    fflush(stdout); \\\n    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \"\\n[-]  SYSTEM ERROR : \" \\\n         cBRI x); \\\n    SAYF(cLRD \"\\n    Stop location : \" cRST \"%s(), %s:%u\\n\", \\\n         __FUNCTION__, __FILE__, __LINE__); \\\n    SAYF(cLRD \"       OS message : \" cRST \"%s\\n\", strerror(errno)); \\\n    exit(1); \\\n  } while (0)\n\n/* Die with FAULT() or PFAULT() depending on the value of res (used to\n   interpret different failure modes for read(), write(), etc). */\n\n#define RPFATAL(res, x...) do { \\\n    if (res < 0) PFATAL(x); else FATAL(x); \\\n  } while (0)\n\n/* Error-checking versions of read() and write() that call RPFATAL() as\n   appropriate. */\n\n#define ck_write(fd, buf, len, fn) do { \\\n    u32 _len = (len); \\\n    s32 _res = write(fd, buf, _len); \\\n    if (_res != _len) RPFATAL(_res, \"Short write to %s\", fn); \\\n  } while (0)\n\n#define ck_read(fd, buf, len, fn) do { \\\n    u32 _len = (len); \\\n    s32 _res = read(fd, buf, _len); \\\n    if (_res != _len) RPFATAL(_res, \"Short read from %s\", fn); \\\n  } while (0)\n\n#endif /* ! _HAVE_DEBUG_H */\n"
  },
  {
    "path": "include/defs.h",
    "content": "#ifndef _HAVE_DEFS_H\n#define _HAVE_DEFS_H\n\n#ifdef DEBUG_INFO\n// #define DEBUG_PRINTF printf\n#define DEBUG_PRINTF(...)                                                      \\\n  do {                                                                         \\\n    printf(__VA_ARGS__);                                                       \\\n  } while (0)\n#else\n#define DEBUG_PRINTF(...)                                                      \\\n  do {                                                                         \\\n  } while (0)\n#endif\n\n#ifndef MIN\n#define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a))\n#define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b))\n#endif /* !MIN */\n\n#ifndef RRR\n#define RRR(x) (random() % (x))\n#endif\n\n#include <stdint.h>\n#include <stdlib.h>\n\ntypedef uint32_t dfsan_label;\n\ntypedef uint8_t u8;\ntypedef uint16_t u16;\ntypedef uint32_t u32;\n#ifdef __x86_64__\ntypedef unsigned long long u64;\ntypedef long long s64;\n#else\ntypedef uint64_t u64;\ntypedef int64_t s64;\n#endif\ntypedef int8_t s8;\ntypedef int16_t s16;\ntypedef int32_t s32;\n\n#endif /* ! _HAVE_DEFS_H */\n"
  },
  {
    "path": "include/launch.h",
    "content": "#ifndef SYMSAN_LAUNCH_H\n#define SYMSAN_LAUNCH_H\n\n#include <stdint.h>\n\n#define SYMSAN_INVALID_ARGS 1;\n#define SYMSAN_NO_MEMORY 2;\n#define SYMSAN_MISSING_BIN 3;\n#define SYMSAN_MISSING_SHM 4;\n#define SYMSAN_MISSING_INPUT 5;\n#define SYMSAN_MISSING_ARGS 6;\n\n/// @brief initialize symsan launcher\n/// @param symsan_bin: path to symsan binary\n/// @param uniontable_size: size of union table\n/// @return pointer to the mapped union table\nvoid* symsan_init(const char *symsan_bin, size_t uniontable_size);\n\n/// @brief set the input file for the target binary\n/// @param input: \"stdin\" or \"file_path\" or \"protocol@host:port\"\n/// @return success or error code\nint symsan_set_input(const char *input);\n\n/// @brief set the arguments for the target binary\n/// @param argc: number of arguments\n/// @param argv: array of arguments\n/// @return success or error code\nint symsan_set_args(const int argc, char* const argv[]);\n\n/// @brief set the debug mode for the target binary\nint symsan_set_debug(int enable);\n\n/// @brief set the bounds check mode for the target binary\nint symsan_set_bounds_check(int enable);\n\n/// @brief set the solve UB mode for the target binary\nint symsan_set_solve_ub(int enable);\n\n/// @brief set the exit on memory error mode for the target binary\nint symsan_set_exit_on_memerror(int enable);\n\n/// @brief set the trace file size mode for the target binary\nint symsan_set_trace_file_size(int enable);\n\n/// @brief set the force stdin mode for the target binary\nint symsan_set_force_stdin(int enable);\n\n/// @brief run the target binary with the input file descriptor\n/// @param fd: input file descriptor, only used if input is \"stdin\"\n/// @return < 0 on syscall error, > 0 on setup error, 0 on success\nint symsan_run(int fd);\n\n/// @brief read event from target binary, will perform cleanup on timeout and EOF\n/// @param buf: buffer to read into\n/// @param size: size of buffer\n/// @param timeout: timeout in milliseconds, 0 for no timeout\n/// @return -1 on error, otherwise number of bytes read\nssize_t symsan_read_event(void *buf, size_t size, unsigned int timeout);\n\n/// @brief terminate target binary\nint symsan_terminate();\n\n/// @brief retrieve exit status\nint symsan_get_exit_status(int *status);\n\n/// @brief teardown shared men\nvoid symsan_destroy();\n\n#endif /* !SYMSAN_LAUNCH_H */\n"
  },
  {
    "path": "include/parse-rgd.h",
    "content": "#pragma once\n\n#include \"parse.h\"\n\n#include \"task.h\"\n#include \"union_find.h\"\n\n#include \"boost/dynamic_bitset.hpp\"\n\nnamespace rgd {\n\nclass RGDAstParser : public symsan::ASTParser<SearchTask> {\npublic:\n  RGDAstParser() = delete;\n  RGDAstParser(void *base, size_t size, bool solve_nested = false, size_t max_ast_size = 200)\n    : symsan::ASTParser<SearchTask>(base, size),\n      solve_nested_(solve_nested), max_ast_size_(max_ast_size) {}\n  ~RGDAstParser() {}\n\n  int restart(std::vector<symsan::input_t> &inputs) override;\n  int parse_cond(dfsan_label label, bool result, bool add_nested,\n                 std::vector<uint64_t> &tasks) override;\n  int parse_gep(dfsan_label ptr_label, uptr ptr,\n                dfsan_label index_label, int64_t index,\n                uint64_t num_elems, uint64_t elem_size,\n                int64_t current_offset, bool enum_index,\n                std::vector<uint64_t> &tasks) override;\n\n  int add_constraints(dfsan_label label, uint64_t result) override;\n\nprotected:\n  const bool solve_nested_;\n  const size_t max_ast_size_;\n\nprivate:\n  enum ast_node_t {\n    NONE_CMP_NODE = 0,\n    CMP_NODE = 1,\n    INVALID_NODE = 2,\n    CONCRETIZE_NODE = 4,\n  };\n\n  using expr_t = std::shared_ptr<rgd::AstNode>;\n  using constraint_t = std::shared_ptr<rgd::Constraint>;\n  using clause_t = std::vector<const rgd::AstNode*>;\n  using formula_t = std::vector<clause_t>;\n\n  // caches\n  std::vector<symsan::input_t> inputs_cache; // input cache\n  std::unordered_map<dfsan_label, expr_t> root_expr_cache; // label -> root expr\n  std::unordered_map<dfsan_label, constraint_t> constraint_cache; // label -> constraint\n  std::vector<uint32_t> ast_size_cache; // label -> size of the AST\n  std::vector<uint8_t> nested_cmp_cache; // label -> nested comparison\n  std::unordered_map<dfsan_label, uint8_t> concretize_node; // label -> concretize node\n\n  // dependencies tracking\n  size_t input_size_; // record the whole input size\n  using input_dep_t = boost::dynamic_bitset<>;\n  std::vector<input_dep_t> branch_to_inputs; // label -> flattened input dependencies\n  // <input_id, offset> will be flattened to bit \\sigma_{i=0}^{input_id}{size_of(input_i)} + offset\n  inline size_t input_to_dep_idx(uint32_t input_id, uint32_t offset) {\n    size_t idx = 0;\n    for (uint32_t i = 0; i < input_id; ++i) {\n      idx += inputs_cache[i].second;\n    }\n    return idx + offset;\n  }\n  UnionFind data_flow_deps;\n  std::vector<std::vector<expr_t> > input_to_branches;\n\n  [[nodiscard]] expr_t get_root_expr(dfsan_label label);\n  [[nodiscard]] bool scan_labels(dfsan_label label);\n  [[nodiscard]] int find_roots(dfsan_label label, AstNode *ret,\n                               std::unordered_set<dfsan_label> &subroots);\n  inline dfsan_label strip_zext(dfsan_label label);\n  [[nodiscard]] int to_nnf(bool expected_r, rgd::AstNode *node);\n  void to_dnf(const rgd::AstNode *node, formula_t &formula);\n  [[nodiscard]] task_t construct_task(const clause_t &clause);\n  [[nodiscard]] constraint_t parse_constraint(dfsan_label label);\n  [[nodiscard]] bool do_uta_rel(dfsan_label label, rgd::AstNode *ret,\n                                constraint_t constraint,\n                                std::unordered_set<dfsan_label> &visited);\n  uint32_t map_arg(uint32_t input_id, uint32_t offset, uint32_t length,\n                   constraint_t constraint);\n\n  bool save_constraint(expr_t expr, bool result);\n  inline void add_nested_constraint(task_t task, const clause_t &nested_caluse);\n};\n\n}; // namespace rgd\n"
  },
  {
    "path": "include/parse-z3.h",
    "content": "#pragma once\n\n#include \"parse.h\"\n\n#include <z3++.h>\n\nnamespace symsan {\n\nusing z3_task_t = std::vector<z3::expr>;\nclass Z3AstParser : public ASTParser<z3_task_t> {\npublic:\n  Z3AstParser() = delete;\n  Z3AstParser(void *base, size_t size, z3::context &context);\n  ~Z3AstParser() {\n    for (Z3_ast ast : expr_cache_) {\n      if (ast != nullptr) {\n        Z3_dec_ref(context_, ast); // decrement reference count\n      }\n    }\n  }\n\n  int restart(std::vector<input_t> &inputs) override;\n  int parse_cond(dfsan_label label, bool result, bool add_nested,\n                 std::vector<uint64_t> &tasks) override;\n  int parse_gep(dfsan_label ptr_label, uptr ptr,\n                dfsan_label index_label, int64_t index,\n                uint64_t num_elems, uint64_t elem_size,\n                int64_t current_offset, bool enum_index,\n                std::vector<uint64_t> &tasks) override;\n\n  int add_constraints(dfsan_label label, uint64_t result) override;\n\nprotected:\n  z3::context &context_;\n  const char* input_name_format;\n  const char* atoi_name_format;\n  const char* strlen_name_format;\n\n  // String ranges for null-byte post-processing (input_id -> list of (start, end))\n  std::unordered_map<uint32_t, std::vector<std::pair<uint32_t, uint32_t>>> string_ranges_;\n\n  // String info cache: label -> (input_id, offset, length)\n  struct string_info_t {\n    uint32_t input_id;\n    uint32_t offset;\n    uint32_t length;\n  };\n  std::unordered_map<dfsan_label, string_info_t> string_info_cache_;\n\nprivate:\n  // Original input cache\n  std::vector<input_t> inputs_cache_;\n\n  // fsize flag\n  bool has_fsize;\n\n  // input deps\n  using input_dep_set_t = std::unordered_set<offset_t, offset_hash>;\n\n  // caches\n  std::vector<uint32_t> tsize_cache_;\n  std::vector<input_dep_set_t> deps_cache_;\n  std::vector<Z3_ast> expr_cache_;\n  std::vector<uint64_t> value_cache_;\n  static const size_t SIZE_INCREMENT = 2048;\n\n  // dependencies\n  struct expr_hash {\n    std::size_t operator()(const z3::expr &expr) const {\n      return expr.hash();\n    }\n  };\n  struct expr_equal {\n    bool operator()(const z3::expr &lhs, const z3::expr &rhs) const {\n      return lhs.id() == rhs.id();\n    }\n  };\n  using expr_set_t = std::unordered_set<z3::expr, expr_hash, expr_equal>;\n  struct branch_dependency {\n    expr_set_t expr_deps;\n    input_dep_set_t input_deps;\n  };\n  using branch_dep_t = std::unique_ptr<struct branch_dependency>;\n  using offset_dep_t = std::vector<branch_dep_t>;\n  std::vector<offset_dep_t> branch_deps_;\n\n  inline struct branch_dependency* get_branch_dep(offset_t off) {\n    auto &offset_deps = branch_deps_.at(off.first);\n    return offset_deps.at(off.second).get();\n  }\n\n  inline void set_branch_dep(offset_t off, branch_dep_t dep) {\n    auto &offset_deps = branch_deps_.at(off.first);\n    if (off.second >= offset_deps.size()) {\n      offset_deps.resize(off.second + 1);\n    }\n    offset_deps[off.second] = std::move(dep);\n  }\n\n  inline void cache_expr(dfsan_label label, z3::expr const &e) {\n    if (label != expr_cache_.size()) {\n      // fprintf(stderr, \"expected label %zu, got %u\\n\",\n      //         expr_cache_.size(), label);\n      throw z3::exception(\"missing or adding too many expressions\");\n    }\n    Z3_ast ast = e;\n    Z3_inc_ref(context_, ast); // increment reference count\n    expr_cache_.emplace_back(ast);\n  }\n\n  inline z3::expr get_cached_expr(dfsan_label label, input_dep_set_t &deps) {\n    if (label >= expr_cache_.size()) {\n      throw z3::exception(\"invalid label\");\n    }\n    Z3_ast ast = expr_cache_[label];\n    if (ast == nullptr) {\n      throw z3::exception(\"cannot find cached expression\");\n    }\n    deps.insert(deps_cache_[label].begin(), deps_cache_[label].end());\n    return z3::expr(context_, ast);\n  }\n\n  inline void dump_value_cache(dfsan_label label);\n\n  z3::expr read_concrete(dfsan_label label, uint16_t size);\n  z3::expr serialize(dfsan_label label, input_dep_set_t &deps);\n  inline void collect_more_deps(input_dep_set_t &deps);\n  inline size_t add_nested_constraints(input_dep_set_t &deps, z3_task_t *task);\n  inline void save_constraint(z3::expr expr, input_dep_set_t &inputs);\n  void construct_index_tasks(z3::expr &index, uint64_t curr,\n                             uint64_t lb, uint64_t ub, uint64_t step,\n                             z3_task_t &nested, std::vector<uint64_t> &tasks);\n\n  // String theory helpers for strchr/strstr\n  z3::expr build_string_from_label(dfsan_label content_label, input_dep_set_t &deps);\n  z3::expr get_byte_expr(uint32_t input, uint32_t offset, input_dep_set_t &deps);\n  bool label_contains_indexof(dfsan_label label);\n};\n\nclass Z3ParserSolver : public Z3AstParser {\npublic:\n  Z3ParserSolver() = delete;\n  Z3ParserSolver(void *base, size_t size, z3::context &context)\n      : Z3AstParser(base, size, context) {}\n  ~Z3ParserSolver() {}\n\n  // Solution operation types\n  enum class solution_op_t : uint8_t {\n    SET,     // Set byte at offset to val\n    INSERT,  // Insert bytes at offset (shifts following bytes right)\n    DELETE   // Delete len bytes starting at offset (shifts following bytes left)\n  };\n\n  struct solution_val {\n    solution_op_t op;\n    uint32_t id;       // input id\n    uint32_t offset;   // position in file\n    union {\n      uint8_t val;     // for SET: the byte value\n      uint32_t len;    // for DELETE: number of bytes to delete\n    };\n    std::vector<uint8_t> data; // for INSERT: bytes to insert\n\n    // Constructors for convenience\n    // SET: set single byte at offset\n    solution_val(uint32_t id, uint32_t offset, uint8_t val)\n        : op(solution_op_t::SET), id(id), offset(offset), val(val) {}\n\n    // INSERT: insert bytes at offset\n    solution_val(uint32_t id, uint32_t offset, std::vector<uint8_t> data)\n        : op(solution_op_t::INSERT), id(id), offset(offset), data(std::move(data)) {}\n\n    // DELETE: delete len bytes at offset\n    solution_val(solution_op_t op, uint32_t id, uint32_t offset, uint32_t len)\n        : op(op), id(id), offset(offset), len(len) {}\n  };\n\n  enum solving_status {\n    invalid_task = 1,\n    opt_sat = 2,\n    opt_unsat = 3,\n    opt_timeout = 4,\n    nested_sat = 5,\n    opt_sat_nested_unsat = 6,\n    opt_sat_nested_timeout = 7,\n    unknown_error,\n  };\n\n  using solution_t = std::vector<struct solution_val>;\n  solving_status solve_task(uint64_t task_id, unsigned timeout, solution_t &solutions);\n\nprivate:\n  void generate_solution(z3::model &m, solution_t &solutions);\n\n};\n\n};"
  },
  {
    "path": "include/parse.h",
    "content": "#pragma once\n\n#include \"dfsan/dfsan.h\"\n\n#include <stdint.h>\n#include <string.h>\n\n#include <memory>\n#include <stdexcept>\n#include <string>\n#include <tuple>\n#include <vector>\n#include <unordered_map>\n#include <unordered_set>\n#include <utility>\n\nnamespace symsan {\n\nusing input_t = std::pair<const uint8_t*, size_t>;\nusing offset_t = std::pair<uint32_t, uint32_t>;\nstruct offset_hash {\n  std::size_t operator()(const offset_t &off) const {\n    uint64_t key = off.first;\n    key <<= 32;\n    key |= off.second;\n    return std::hash<uint64_t>{}(key);\n  }\n};\n\ntemplate <class T>\nclass ASTParser {\npublic:\n  ASTParser() = delete;\n  ASTParser(void *base, size_t size)\n    : base_(static_cast<dfsan_label_info*>(base)),\n      size_(size / sizeof(dfsan_label_info)),\n      prev_task_id_(0) {}\n  virtual ~ASTParser() {}\n\n  virtual int restart(std::vector<input_t> &inputs) {\n    (void)inputs;\n    memcmp_cache_.clear();\n    return 0;\n  }\n  /// @brief Parse a conditional branch\n  /// @param label the label of the condition\n  /// @param result the result of the condition\n  /// @param add_nested whether to add nested constraints\n  /// @param tasks the tasks to be added\n  /// @return 0 on success, -1 on failure\n  virtual int parse_cond(dfsan_label label, bool result, bool add_nested,\n                         std::vector<uint64_t> &tasks) = 0;\n  /// @brief Parse a GEP instruction with symbolic index\n  /// @param ptr_label symbol label of the pointer (e.g., bounds info)\n  /// @param ptr actual pointer value\n  /// @param index_label symbolic label of the index\n  /// @param index actual index value\n  /// @param num_elems number of elements if ptr is an array\n  /// @param elem_size size of each element\n  /// @param current_offset current offset from previous GEP\n  /// @param enum_index whether to enumerate all possible indices\n  /// @param tasks tasks to be added\n  /// @return 0 on success, -1 on failure\n  virtual int parse_gep(dfsan_label ptr_label, uptr ptr,\n                        dfsan_label index_label, int64_t index,\n                        uint64_t num_elems, uint64_t elem_size,\n                        int64_t current_offset, bool enum_index,\n                        std::vector<uint64_t> &tasks) = 0;\n\n  /// @brief Add a constraint, typically from symbolic offset\n  /// @param label symbolic label of the constraint\n  /// @param result concrete value of the constraint\n  /// @return 0 on success, -1 on failure\n  virtual int add_constraints(dfsan_label label, uint64_t result) = 0;\n\n  virtual int record_memcmp(dfsan_label label, uint8_t* buf, size_t size) {\n    auto content = std::make_unique<uint8_t[]>(size);\n    memcpy(content.get(), buf, size);\n    memcmp_cache_.insert({label, std::move(content)});\n    return 0;\n  };\n\n  // use shared_ptr to auto-free task\n  virtual std::shared_ptr<T> retrieve_task(uint64_t id) {\n    auto it = tasks_.find(id);\n    if (it == tasks_.end()) {\n      return nullptr;\n    }\n    auto tmp = std::move(it->second);\n    tasks_.erase(it);\n    return tmp;\n  }\n\nprotected:\n  inline dfsan_label_info* get_label_info(dfsan_label label) {\n    if (label >= size_) {\n      throw std::out_of_range(\"label too large \" + std::to_string(label));\n    }\n    return &base_[label];\n  }\n\n  inline uint64_t save_task(std::shared_ptr<T> task) {\n    uint64_t tid = prev_task_id_++;\n    tasks_.insert({tid, task});\n    return tid;\n  }\n\n  dfsan_label_info *base_;\n  size_t size_;\n  uint64_t prev_task_id_;\n  std::unordered_map<uint64_t, std::shared_ptr<T>> tasks_;\n  std::unordered_map<dfsan_label, std::unique_ptr<uint8_t[]>> memcmp_cache_;\n};\n\n}; // namespace symsan\n"
  },
  {
    "path": "include/solver.h",
    "content": "#pragma once\n\n#include \"task.h\"\n\n#include <stdint.h>\n#include <z3++.h>\n\n#include <vector>\n#include <unordered_map>\n#include <utility>\n#include <memory>\n#include <atomic>\n\nnamespace rgd {\n\nenum solver_result_t {\n  SOLVER_ERROR,\n  SOLVER_SAT,\n  SOLVER_UNSAT,\n  SOLVER_TIMEOUT,\n};\n\nclass Solver {\npublic:\n  virtual ~Solver() {};\n  virtual solver_result_t solve(std::shared_ptr<SearchTask> task,\n                                const uint8_t *in_buf, size_t in_size,\n                                uint8_t *out_buf, size_t &out_size) = 0;\n  virtual void print_stats(int fd) = 0;\n};\n\nclass Z3Solver : public Solver {\npublic:\n  Z3Solver();\n  solver_result_t solve(std::shared_ptr<SearchTask> task,\n                        const uint8_t *in_buf, size_t in_size,\n                        uint8_t *out_buf, size_t &out_size) override;\n  void print_stats(int fd) override {} ;\nprivate:\n  z3::expr serialize_rel(uint32_t comparison,\n                         const AstNode* node,\n                         const std::vector<std::pair<bool, uint64_t>> &input_args,\n                         std::unordered_map<uint32_t,z3::expr> &expr_cache);\n\n  z3::expr serialize(const AstNode* node,\n                     const std::vector<std::pair<bool, uint64_t>> &input_args,\n                     std::unordered_map<uint32_t,z3::expr> &expr_cache);\n\n  z3::context &context_;\n  z3::solver solver_;\n};\n\nclass JITSolver : public Solver {\npublic:\n  JITSolver();\n  solver_result_t solve(std::shared_ptr<SearchTask> task,\n                        const uint8_t *in_buf, size_t in_size,\n                        uint8_t *out_buf, size_t &out_size) override;\n  void print_stats(int fd) override;\nprivate:\n  std::atomic_ulong uuid;\n  std::atomic_ulong cache_hits;\n  std::atomic_ulong cache_misses;\n  std::atomic_ulong num_timeout;\n  std::atomic_ulong num_solved;\n  std::atomic_ulong process_time;\n  std::atomic_ulong jit_time;\n  std::atomic_ulong solving_time;\n};\n\nclass I2SSolver : public Solver {\npublic:\n  I2SSolver();\n  solver_result_t solve(std::shared_ptr<SearchTask> task,\n                        const uint8_t *in_buf, size_t in_size,\n                        uint8_t *out_buf, size_t &out_size) override;\n  void print_stats(int fd) override {};\nprivate:\n  uint64_t matches;\n  uint64_t mismatches;\n  std::bitset<rgd::LastOp> binop_mask;\n\n  solver_result_t solve_icmp(std::shared_ptr<const Constraint> const& c,\n                             std::unique_ptr<ConsMeta> const& cm,\n                             uint32_t comparison,\n                             const uint8_t *in_buf, size_t in_size,\n                             uint8_t *out_buf, size_t &out_size);\n  solver_result_t solve_memcmp(std::shared_ptr<const Constraint> const& c,\n                               std::unique_ptr<ConsMeta> const& cm,\n                               const uint8_t *in_buf, size_t in_size,\n                               uint8_t *out_buf, size_t &out_size);\n};\n\n}; // namespace rgd\n"
  },
  {
    "path": "include/task.h",
    "content": "#pragma once\n\n#include <stdint.h>\n\n#include <bitset>\n#include <cassert>\n#include <map>\n#include <memory>\n#include <stdexcept>\n#include <tuple>\n#include <unordered_map>\n#include <vector>\n\n#include \"ast.h\"\n#include \"cov.h\"\n\nnamespace rgd {\n\n// JIT'ed function for each relational constraint\ntypedef void(*test_fn_type)(uint64_t*);\n\n// the first two slots of the arguments for reseved for the left and right operands\nstatic const int RET_OFFSET = 2;\n\nstruct Constraint {\n  Constraint() = delete;\n  Constraint(int ast_size): fn(nullptr), const_num(0) {\n    ast = std::make_shared<AstNode>(ast_size);\n  }\n  Constraint(const Constraint&) = default; // XXX: okay to use default?\n  const AstNode *get_root() const { return const_cast<const AstNode*>(ast.get()); }\n\n  // JIT'ed function for a comparison expression\n  test_fn_type fn;\n  // the AST\n  std::shared_ptr<AstNode> ast;\n\n  // During constraint collection, (symbolic) input bytes are recorded\n  // as offsets from the beginning of the input.  However, the JIT'ed\n  // function consumes inputs as an input array.  So, when building the\n  // function, we need to map the offset to the idx in input array,\n  // which is stored in local_map.\n  std::map<size_t, uint32_t> local_map;\n  // if const {false, const value}, if symbolic {true, index in the inputs}\n  // during local search, we use a single global array (to avoid memory\n  // allocation and free) to prepare the inputs, so we need to know where\n  // to load the input values into the input array.\n  std::vector<std::pair<bool, uint64_t>> input_args;\n  // map the offset to iv (initial value)\n  std::unordered_map<uint32_t, uint8_t> inputs;\n  // shape information about the input (e.g., 1, 2, 4, 8 bytes)\n  std::unordered_map<uint32_t, uint32_t> shapes;\n  // special infomation for atoi: offset -> (result_length, base, str_length)\n  std::unordered_map<uint32_t, std::tuple<uint32_t, uint32_t, uint32_t>> atoi_info;\n  // record the involved operations\n  std::bitset<rgd::LastOp> ops;\n  // number of constant in the input array\n  uint32_t const_num;\n  // recorded comparison operands\n  uint64_t op1, op2;\n};\n\nstruct ConsMeta {\n  // per-constraint arg mapping, so we can share the constraints\n  std::vector<std::pair<bool, uint64_t>> input_args;\n  // per-constraint relational operator, so we can share the AST\n  uint32_t comparison;\n  // input2state inference related\n  std::vector<std::pair<size_t, uint32_t>> i2s_candidates;\n  uint64_t op1, op2;\n};\n\nclass SearchTask {\npublic:\n  SearchTask(): scratch_args(nullptr), max_const_num_(0),\n      stopped(false), attempts(0), solved(false), skip_next(false),\n      base_task(nullptr) {}\n  SearchTask(const SearchTask&) = delete;\n  ~SearchTask() { if (scratch_args) free(scratch_args); }\n  inline bool has_finalized() const { return scratch_args != nullptr; }\n\n  using constraint_t = std::shared_ptr<const Constraint>;\n  using consmeta_t = std::unique_ptr<ConsMeta>;\n  using input_t = std::pair<uint32_t, uint8_t>;\n\n  inline bool empty() const {\n    return constraints_.empty();\n  }\n\n  inline size_t size() const {\n    return constraints_.size();\n  }\n\n  inline void add_constraint(constraint_t constraint, uint32_t comparison) {\n    if (has_finalized()) {\n      throw std::runtime_error(\"Cannot add constraints after finalization\");\n    }\n    constraints_.push_back(constraint);\n    comparisons_.push_back(comparison);\n  }\n\n  inline const constraint_t& constraints(size_t i) const {\n    return constraints_.at(i);\n  }\n\n  inline const uint32_t comparisons(size_t i) const {\n    return comparisons_.at(i);\n  }\n\n  inline consmeta_t& consmetas(size_t i) {\n    // consmeta can be changed, but the vector cannot\n    return consmetas_.at(i);\n  }\n\n  inline size_t inputs_size() const {\n    return inputs_.size();\n  }\n\n  inline auto const& inputs() const {\n    return inputs_;\n  }\n\n  inline const uint32_t shapes(uint32_t offset) const {\n    auto itr = shapes_.find(offset);\n    if (itr == shapes_.end()) {\n      throw std::runtime_error(\"Cannot find shape information\");\n    }\n    return itr->second;\n  }\n\n  inline auto const& atoi_info() const {\n    return atoi_info_;\n  }\n\n  inline auto const& cmap(uint32_t index) const {\n    if (index >= inputs_.size()) {\n      throw std::out_of_range(\"index out of range\");\n    }\n    auto itr = cmap_.find(index);\n    if (itr == cmap_.end()) {\n      throw std::runtime_error(\"Cannot find constraint mapping\");\n    }\n    return itr->second;\n  }\n\nprivate:\n  // varaible that should not change after finalization\n\n  // constraints, could be shared, strictly read-only\n  std::vector<constraint_t> constraints_;\n  // temporary storage for the comparison operation\n  std::vector<uint32_t> comparisons_;\n  // per-constraint mutable metadata\n  std::vector<consmeta_t> consmetas_;\n\n  // inputs as pairs of <offset (from the beginning of the input, and value>\n  std::vector<std::pair<uint32_t, uint8_t>> inputs_;\n  // shape information at each offset\n  std::unordered_map<uint32_t, uint32_t> shapes_;\n  // aggreated atoi info\n  std::unordered_map<uint32_t, std::tuple<uint32_t, uint32_t, uint32_t>> atoi_info_;\n  // max number of constants in the input array\n  uint32_t max_const_num_;\n  // record constraints that use a certain input byte\n  std::unordered_map<uint32_t, std::vector<size_t>> cmap_;\n\npublic:\n  // scratching area for solving the task\n\n  // the input array used for all JIT'ed functions\n  // all input bytes are extended to 64 bits\n  uint64_t* scratch_args;\n\n  // intermediate states for the search\n  std::vector<uint64_t> min_distances; // current best\n  std::vector<uint64_t> distances; // general scratch\n  std::vector<uint64_t> plus_distances; // used in partial derivation\n  std::vector<uint64_t> minus_distances; // used in partial derivation\n\n  // statistics\n  uint64_t start; //start time\n  bool stopped;\n  int attempts;\n\n  // solutions\n  bool solved;\n  std::unordered_map<size_t, uint8_t> solution;\n\n  // base task\n  std::shared_ptr<SearchTask> base_task;\n  bool skip_next; // FIXME: an ugly hack to skip the next task\n\n  void finalize() {\n    // aggregate the contraints, map each input byte to a constraint to\n    // an index in the \"global\" input array (i.e., the scratch_args)\n    std::unordered_map<uint32_t, uint32_t> sym_map;\n    uint32_t gidx = 0;\n    size_t num_const = constraints_.size();\n    for (size_t i = 0; i < num_const; i++) {\n      auto const& constraint = constraints_[i];\n      std::unique_ptr<ConsMeta> cm = std::make_unique<ConsMeta>();\n      cm->input_args = constraint->input_args;\n      cm->comparison = comparisons_[i];\n      uint32_t last_offset = -1;\n      uint32_t size = 0;\n      for (const auto& [offset, lidx] : constraint->local_map) {\n        auto gitr = sym_map.find(offset);\n        if (gitr == sym_map.end()) {\n          gidx = inputs_.size();\n          sym_map[offset] = gidx;\n          inputs_.push_back(std::make_pair(offset, constraint->inputs.at(offset)));\n          shapes_[offset] = constraint->shapes.at(offset);\n        } else {\n          gidx = gitr->second;\n        }\n        // record input to constraint mapping\n        // skip memcmp constraints\n        if (cm->comparison != rgd::Memcmp && cm->comparison != rgd::MemcmpN) {\n          auto slot = cmap_.find(gidx);\n          if (slot != cmap_.end()) {\n            slot->second.push_back(i);\n          } else {\n            cmap_.emplace(std::make_pair(gidx, std::vector<size_t>{i}));\n          }\n        }\n        // save the mapping between the local index (i.e., where the JIT'ed\n        // function is going to read the input from) and the global index\n        // (i.e., where the current value corresponding to the input byte\n        // is stored in MutInput)\n        cm->input_args[lidx].second = gidx;\n\n        // check if the input bytes are consecutive\n        // using std::map ensures that the offsets (keys) are sorted\n        if (last_offset != -1 && last_offset + 1 != offset) {\n          // a new set of consecutive input bytes, save the info\n          // and resset\n          cm->i2s_candidates.push_back({last_offset + 1 - size, size});\n          size = 0;\n        }\n        last_offset = offset;\n        size++;\n      }\n      // save the last set of consecutive input bytes\n      cm->i2s_candidates.push_back({last_offset + 1 - size, size});\n\n      // process atoi\n      for (const auto& [offset, info] : constraint->atoi_info) {\n        // check dependencies\n        uint32_t length = std::get<2>(info);\n        for (auto j = 0; j < length; ++j) {\n          auto ditr = cmap_.find(offset + j);\n          if (ditr != cmap_.end()) {\n            fprintf(stderr, \"atoi bytes (%d) used in other constraints\\n\", offset + j);\n          }\n        }\n        auto itr = atoi_info_.find(offset);\n        if (itr != atoi_info_.end()) {\n          fprintf(stderr, \"atoi bytes (%d) already exists\\n\", offset);\n          assert(info == itr->second);\n        }\n        atoi_info_[offset] = info;\n      }\n\n      // update the number of required constants in the input array\n      if (max_const_num_ < constraint->const_num)\n        max_const_num_ = constraint->const_num;\n\n      // insert the constraint metadata\n      consmetas_.push_back(std::move(cm));\n    }\n\n    // fill the gap in cmap_\n    for (size_t i = 0; i < inputs_.size(); i++) {\n      auto slot = cmap_.find(i);\n      if (slot == cmap_.end()) {\n        cmap_.emplace(std::make_pair(i, std::vector<size_t>{}));\n      }\n    }\n\n    // allocate the input array, reserver 2 for comparison operands a,b\n    scratch_args = (uint64_t*)aligned_alloc(sizeof(*scratch_args),\n        (2 + inputs_.size() + max_const_num_ + 1) * sizeof(*scratch_args));\n    min_distances.resize(num_const, 0);\n    distances.resize(num_const, 0);\n    plus_distances.resize(num_const, 0);\n    minus_distances.resize(num_const, 0);\n  }\n\n  void load_hint() { // load hint from base task\n    if (!base_task || !base_task->solved) return;\n    for (auto itr = inputs_.begin(), e = inputs_.end(); itr != e; itr++) {\n      auto got = base_task->solution.find(itr->first);\n      if (got != base_task->solution.end())\n        itr->second = got->second;\n    }\n  }\n\n};\n\nusing task_t = std::shared_ptr<rgd::SearchTask>;\n\n}; // namespace rgd\n"
  },
  {
    "path": "include/task_mgr.h",
    "content": "#pragma once\n\n#include \"task.h\"\n\n#include <deque>\n#include <memory>\n\nnamespace rgd {\n\nclass TaskManager {\npublic:\n  virtual ~TaskManager() {}\n  virtual bool add_task(std::shared_ptr<BranchContext> ctx, std::shared_ptr<SearchTask> task) = 0;\n  virtual std::shared_ptr<SearchTask> get_next_task() = 0;\n  virtual size_t get_num_tasks() = 0;\n};\n\nclass FIFOTaskManager : public TaskManager {\npublic:\n  bool add_task(std::shared_ptr<BranchContext> ctx, std::shared_ptr<SearchTask> task) override {\n    (void)ctx;\n    tasks.push_back(std::move(task));\n    return true;\n  }\n\n  std::shared_ptr<SearchTask> get_next_task() override {\n    if (tasks.empty()) return nullptr;\n    auto task = std::move(tasks.front());\n    tasks.pop_front();\n    return task;\n  }\n\n  size_t get_num_tasks() override {\n    return tasks.size();\n  }\n\nprivate:\n  std::deque<task_t> tasks;\n};\n\n};  // namespace rgd\n"
  },
  {
    "path": "include/union_find.h",
    "content": "#pragma once\n\n#include <stdint.h>\n#include <vector>\n#include <unordered_set>\n\nnamespace rgd {\n\n// disjoint set data structure\nclass UnionFind {\npublic:\n  static const size_t INVALID = (size_t)-1;\n\n  UnionFind() : size_(0) {};\n  UnionFind(size_t size) {\n    reset(size);\n  };\n\n  void reset(size_t size) {\n    size_ = size;\n    parent.resize(size);\n    next.resize(size);\n    rank.resize(size);\n    for (size_t i = 0; i < size; ++i) {\n      parent[i] = i;\n      next[i] = i;\n      rank[i] = 0;\n    }\n  }\n\n  // find the root of the set containing x\n  size_t find(size_t x) {\n    if (x >= size_) return INVALID;\n\n    size_t p = parent[x];\n    while (x != p) {\n      size_t gp = parent[p];\n      parent[x] = gp;\n      x = p;\n      p = gp;\n    }\n    return x;\n  }\n\n  // merge the sets containing x and y, return new root\n  size_t merge(size_t x, size_t y) {\n    if (x >= size_) return INVALID;\n    if (y >= size_) return INVALID;\n\n    size_t x_root = find(x);\n    size_t y_root = find(y);\n    if (x_root == y_root) return x_root;\n\n    // merge link list\n    size_t x_next = next[x_root];\n    next[x_root] = next[y_root];\n    next[y_root] = x_next;\n\n    if (rank[x_root] < rank[y_root]) {\n      parent[x_root] = y_root;\n      return y_root;\n    } else if (rank[x_root] > rank[y_root]) {\n      parent[y_root] = x_root;\n      return x_root;\n    } else {\n      parent[y_root] = x_root;\n      rank[x_root]++;\n      return x_root;\n    }\n  }\n\n  // get the set containing x\n  size_t get_set(size_t x, std::unordered_set<size_t> &set) {\n    if (x >= size_) return INVALID;\n    size_t temp = x;\n    set.clear();\n    set.insert(temp);\n    while (next[temp] != x) {\n      temp = next[temp];\n      set.insert(temp);\n    }\n    return set.size();\n  }\n\nprivate:\n  size_t size_;\n  std::vector<size_t> parent;\n  std::vector<size_t> next;\n  std::vector<size_t> rank;\n};\n\n};"
  },
  {
    "path": "include/version.h",
    "content": "\n#ifndef _ANGORA_LLVM_VERSION_H\n#define _ANGORA_LLVM_VERSION_H\n\n#define LLVM_VERSION(major, minor) ((major)*100 + (minor))\n#define LLVM_VERSION_CODE LLVM_VERSION(LLVM_VERSION_MAJOR, LLVM_VERSION_MINOR)\n\n#if LLVM_VERSION_CODE >= LLVM_VERSION(5, 0)\n#define LLVM_ATTRIBUTE_LIST AttributeList\n\n#define LLVM_NEW_ALLOCINST(ty, name, insertp)                                  \\\n  (new AllocaInst(ty, getDataLayout().getAllocaAddrSpace(), name, insertp))\n\n#define LLVM_REMOVE_ATTRIBUTE(func, attr, attrbuilder)                         \\\n  func->removeAttributes(attr, attrbuilder)\n\n#else\n\n#define LLVM_ATTRIBUTE_LIST AttributeSet\n\n#define LLVM_NEW_ALLOCINST(ty, name, insertp)                                  \\\n  (new AllocaInst(ty, name, insertp))\n\n#define LLVM_REMOVE_ATTRIBUTE(func, attr, attrbuilder)                         \\\n  func->removeAttributes(                                                      \\\n      attr, LLVM_ATTRIBUTE_LIST::get(func->getContext(), attr, attrbuilder))\n\n#endif\n\n#if LLVM_VERSION_CODE >= LLVM_VERSION(6, 0)\n\n#define SCL_INSECTION(scl, section, prefix, query, category)                   \\\n  scl->inSection(section, prefix, query, category)\n\n#define LLVM_ADD_PARAM_ATTR(func, argno, attr) func->addParamAttr(argno, attr)\n\n#else\n\n#define SCL_INSECTION(scl, section, prefix, query, category)                   \\\n  scl->inSection(prefix, query, category)\n\n#define LLVM_ADD_PARAM_ATTR(func, argno, attr)                                 \\\n  func->addAttribute(argno + 1, attr)\n\n#endif\n\n#endif\n"
  },
  {
    "path": "instrumentation/CMakeLists.txt",
    "content": "set (CMAKE_CXX_STANDARD 14)\nset (CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -g\")\n# fix pass bug: https://github.com/sampsyo/llvm-pass-skeleton/issues/7#issuecomment-401834287\nset (CMAKE_MODULE_LINKER_FLAGS \"${CMAKE_CXX_LINK_FLAGS} -Wl,-znodelete\")\nif(APPLE)\n    # User teor2345 reports that this is required to make things work on MacOS X.\n    set (CMAKE_MODULE_LINK_FLAGS \"${CMAKE_CXX_LINK_FLAGS} -Wl,-flat_namespace -Wl,-undefined,suppress\")\nendif(APPLE)\n\ninclude(AddLLVM)\nadd_llvm_pass_plugin(TaintPass TaintPass.cpp)\ninstall (TARGETS TaintPass DESTINATION ${SYMSAN_LIB_DIR})\n"
  },
  {
    "path": "instrumentation/TaintPass.cpp",
    "content": "//===- Taint.cpp - dynamic taint analysis --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n/// \\file\n/// This file is a part of Taint, a specialized taint analysis for symbolic\n/// execution.\n//\n//===----------------------------------------------------------------------===//\n\n//#include \"defs.h\"\n#include \"version.h\"\n\n#include \"llvm/ADT/DenseMap.h\"\n#include \"llvm/ADT/DenseSet.h\"\n#include \"llvm/ADT/DepthFirstIterator.h\"\n#include \"llvm/ADT/None.h\"\n#include \"llvm/ADT/SmallPtrSet.h\"\n#include \"llvm/ADT/SmallVector.h\"\n#include \"llvm/ADT/StringExtras.h\"\n#include \"llvm/ADT/StringRef.h\"\n#include \"llvm/ADT/Triple.h\"\n#include \"llvm/ADT/iterator.h\"\n#include \"llvm/Analysis/ValueTracking.h\"\n#include \"llvm/IR/Argument.h\"\n#include \"llvm/IR/Attributes.h\"\n#include \"llvm/IR/BasicBlock.h\"\n#include \"llvm/IR/Constant.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DataLayout.h\"\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/Dominators.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/GlobalAlias.h\"\n#include \"llvm/IR/GlobalValue.h\"\n#include \"llvm/IR/GlobalVariable.h\"\n#include \"llvm/IR/IRBuilder.h\"\n#include \"llvm/IR/InlineAsm.h\"\n#include \"llvm/IR/InstVisitor.h\"\n#include \"llvm/IR/InstrTypes.h\"\n#include \"llvm/IR/Instruction.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/IntrinsicInst.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/IR/LegacyPassManager.h\"\n#include \"llvm/IR/MDBuilder.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/PassManager.h\"\n#include \"llvm/IR/Type.h\"\n#include \"llvm/IR/User.h\"\n#include \"llvm/IR/Value.h\"\n#include \"llvm/InitializePasses.h\"\n#include \"llvm/Pass.h\"\n#include \"llvm/Passes/OptimizationLevel.h\"\n#include \"llvm/Passes/PassBuilder.h\"\n#include \"llvm/Passes/PassPlugin.h\"\n#include \"llvm/Support/Alignment.h\"\n#include \"llvm/Support/Casting.h\"\n#include \"llvm/Support/CommandLine.h\"\n#include \"llvm/Support/DJB.h\"\n#include \"llvm/Support/ErrorHandling.h\"\n#include \"llvm/Support/SpecialCaseList.h\"\n#include \"llvm/Support/VirtualFileSystem.h\"\n#include \"llvm/Transforms/Instrumentation.h\"\n#include \"llvm/Transforms/IPO/PassManagerBuilder.h\"\n#include \"llvm/Transforms/Utils/BasicBlockUtils.h\"\n#include \"llvm/Transforms/Utils/Local.h\"\n#include <algorithm>\n#include <cassert>\n#include <cstddef>\n#include <cstdint>\n#include <functional>\n#include <iterator>\n#include <memory>\n#include <set>\n#include <string>\n#include <utility>\n#include <vector>\n\nusing namespace llvm;\n\n// This must be consistent with ShadowWidthBits.\nstatic const Align ShadowTLSAlignment = Align(4);\n\n// The size of TLS variables. These constants must be kept in sync with the ones\n// in dfsan.cpp.\nstatic const unsigned ArgTLSSize = 800;\nstatic const unsigned RetvalTLSSize = 800;\n\n// The -taint-preserve-alignment flag controls whether this pass assumes that\n// alignment requirements provided by the input IR are correct.  For example,\n// if the input IR contains a load with alignment 8, this flag will cause\n// the shadow load to have alignment 16.  This flag is disabled by default as\n// we have unfortunately encountered too much code (including Clang itself;\n// see PR14291) which performs misaligned access.\nstatic cl::opt<bool> ClPreserveAlignment(\n    \"taint-preserve-alignment\",\n    cl::desc(\"respect alignment requirements provided by input IR\"), cl::Hidden,\n    cl::init(false));\n\n// The ABI list files control how shadow parameters are passed. The pass treats\n// every function labelled \"uninstrumented\" in the ABI list file as conforming\n// to the \"native\" (i.e. unsanitized) ABI.  Unless the ABI list contains\n// additional annotations for those functions, a call to one of those functions\n// will produce a warning message, as the labelling behaviour of the function is\n// unknown. The other supported annotations for uninstrumented functions are\n// \"functional\" and \"discard\", which are described below under\n// Taint::WrapperKind.\n// Functions will often be labelled with both \"uninstrumented\" and one of\n// \"functional\" or \"discard\". This will leave the function unchanged by this\n// pass, and create a wrapper function that will call the original.\n//\n// Instrumented functions can also be annotated as \"force_zero_labels\", which\n// will make all shadow and return values set zero labels.\n// Functions should never be labelled with both \"force_zero_labels\" and\n// \"uninstrumented\" or any of the unistrumented wrapper kinds.\nstatic cl::list<std::string> ClABIListFiles(\n    \"taint-abilist\",\n    cl::desc(\"File listing native ABI functions and how the pass treats them\"),\n    cl::Hidden);\n\n// Controls whether the pass includes or ignores the labels of pointers in load\n// instructions.\nstatic cl::opt<bool> ClCombinePointerLabelsOnLoad(\n    \"taint-combine-pointer-labels-on-load\",\n    cl::desc(\"Combine the label of the pointer with the label of the data when \"\n             \"loading from memory.\"),\n    cl::Hidden, cl::init(false));\n\n// Controls whether the pass includes or ignores the labels of pointers in\n// stores instructions.\nstatic cl::opt<bool> ClCombinePointerLabelsOnStore(\n    \"taint-combine-pointer-labels-on-store\",\n    cl::desc(\"Combine the label of the pointer with the label of the data when \"\n             \"storing in memory.\"),\n    cl::Hidden, cl::init(false));\n\nstatic cl::opt<bool> ClDebugNonzeroLabels(\n    \"taint-debug-nonzero-labels\",\n    cl::desc(\"Insert calls to __dfsan_nonzero_label on observing a parameter, \"\n             \"load or return with a nonzero label\"),\n    cl::Hidden);\n\nstatic cl::opt<bool> ClIgnorePersonalityRoutine(\n    \"taint-ignore-personality-routine\",\n    cl::desc(\"If a personality routine is marked uninstrumented from the ABI \"\n             \"list, do not create a wrapper for it.\"),\n    cl::Hidden, cl::init(false));\n\n// SYMSAN specific flags, invoke a callback function to trace GEP events\nstatic cl::opt<bool> ClTraceGEPOffset(\n    \"taint-trace-gep\",\n    cl::desc(\"Trace GEP offset for solving.\"),\n    cl::Hidden, cl::init(true));\n\n// Experimental feature, trace floating point operations\nstatic cl::opt<bool> ClTraceFP(\n    \"taint-trace-float-pointer\",\n    cl::desc(\"Propagate taint for floating pointer instructions.\"),\n    cl::Hidden, cl::init(false));\n\nstatic cl::opt<bool> ClTraceLoop(\n    \"taint-trace-loop\",\n    cl::desc(\"Trace loop entering and exiting.\"),\n    cl::Hidden, cl::init(true));\n\n// SYMSAN specific flags, enable memory safety checks (both spatial and temporal)\nstatic cl::opt<bool> ClTraceBound(\n    \"taint-trace-bound\",\n    cl::desc(\"Trace buffer bound info.\"),\n    cl::Hidden, cl::init(true));\n\n// SYMSAN specific flags, enable generating solving tasks for undefined behaviour\nstatic cl::opt<bool> ClSolveUB(\n    \"taint-solve-ub\",\n    cl::desc(\"Solve undefined behaviours.\"),\n    cl::Hidden, cl::init(false));\n\n// SYMSAN specific flags, only send events for annotated basic blocks\nstatic cl::opt<bool> ClTraceAnnotatedBB(\n    \"taint-trace-annotated-bb\",\n    cl::desc(\"Only trace annotated basic blocks.\"),\n    cl::Hidden, cl::init(false));\n\nstatic StringRef getGlobalTypeString(const GlobalValue &G) {\n  // Types of GlobalVariables are always pointer types.\n  Type *GType = G.getValueType();\n  // For now we support excluding struct types only.\n  if (StructType *SGType = dyn_cast<StructType>(GType)) {\n    if (!SGType->isLiteral())\n      return SGType->getName();\n  }\n  return \"<unknown type>\";\n}\n\nnamespace {\n\n// Memory map parameters used in application-to-shadow address calculation.\n// Offset = (Addr & ~AndMask) ^ XorMask\n// Shadow = ShadowBase + Offset * ShadowWidthBytes\nstruct MemoryMapParams {\n  uint64_t AndMask;\n  uint64_t XorMask;\n  uint64_t ShadowBase;\n};\n\n} // end anonymous namespace\n\n// x86_64 Linux\n// NOLINTNEXTLINE(readability-identifier-naming)\nstatic const MemoryMapParams Linux_X86_64_MemoryMapParams = {\n    0x700000000000, // AndMask (keep old style)\n    0,              // XorMask (not used)\n    0,              // ShadowBase (not used)\n};\n\nnamespace {\n\nclass TaintABIList {\n  std::unique_ptr<SpecialCaseList> SCL;\n\n public:\n  TaintABIList() = default;\n\n  void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }\n\n  /// Returns whether either this function or its source file are listed in the\n  /// given category.\n  bool isIn(const Function &F, StringRef Category) const {\n    return isIn(*F.getParent(), Category) ||\n           SCL->inSection(\"taint\", \"fun\", F.getName(), Category);\n  }\n\n  /// Returns whether this global alias is listed in the given category.\n  ///\n  /// If GA aliases a function, the alias's name is matched as a function name\n  /// would be.  Similarly, aliases of globals are matched like globals.\n  bool isIn(const GlobalAlias &GA, StringRef Category) const {\n    if (isIn(*GA.getParent(), Category))\n      return true;\n\n    if (isa<FunctionType>(GA.getValueType()))\n      return SCL->inSection(\"taint\", \"fun\", GA.getName(), Category);\n\n    return SCL->inSection(\"taint\", \"global\", GA.getName(), Category) ||\n           SCL->inSection(\"dataflow\", \"type\", getGlobalTypeString(GA),\n                          Category);\n  }\n\n  /// Returns whether this module is listed in the given category.\n  bool isIn(const Module &M, StringRef Category) const {\n    return SCL->inSection(\"taint\", \"src\", M.getModuleIdentifier(), Category);\n  }\n};\n\n/// TransformedFunction is used to express the result of transforming one\n/// function type into another.  This struct is immutable.  It holds metadata\n/// useful for updating calls of the old function to the new type.\nstruct TransformedFunction {\n  TransformedFunction(FunctionType* OriginalType,\n                      FunctionType* TransformedType,\n                      std::vector<unsigned> ArgumentIndexMapping)\n      : OriginalType(OriginalType),\n        TransformedType(TransformedType),\n        ArgumentIndexMapping(ArgumentIndexMapping) {}\n\n  // Disallow copies.\n  TransformedFunction(const TransformedFunction &) = delete;\n  TransformedFunction &operator=(const TransformedFunction &) = delete;\n\n  // Allow moves.\n  TransformedFunction(TransformedFunction &&) = default;\n  TransformedFunction &operator=(TransformedFunction &&) = default;\n\n  /// Type of the function before the transformation.\n  FunctionType *OriginalType;\n\n  /// Type of the function after the transformation.\n  FunctionType *TransformedType;\n\n  /// Transforming a function may change the position of arguments.  This\n  /// member records the mapping from each argument's old position to its new\n  /// position.  Argument positions are zero-indexed.  If the transformation\n  /// from F to F' made the first argument of F into the third argument of F',\n  /// then ArgumentIndexMapping[0] will equal 2.\n  std::vector<unsigned> ArgumentIndexMapping;\n};\n\n/// Given function attributes from a call site for the original function,\n/// return function attributes appropriate for a call to the transformed\n/// function.\nAttributeList\nTransformFunctionAttributes(const TransformedFunction& TransformedFunction,\n                            LLVMContext& Ctx, AttributeList CallSiteAttrs) {\n\n  // Construct a vector of AttributeSet for each function argument.\n  std::vector<llvm::AttributeSet> ArgumentAttributes(\n      TransformedFunction.TransformedType->getNumParams());\n\n  // Copy attributes from the parameter of the original function to the\n  // transformed version.  'ArgumentIndexMapping' holds the mapping from\n  // old argument position to new.\n  for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size();\n       I < IE; ++I) {\n    unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I];\n    ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttrs(I);\n  }\n\n  // Copy annotations on varargs arguments.\n  for (unsigned I = TransformedFunction.OriginalType->getNumParams(),\n                IE = CallSiteAttrs.getNumAttrSets();\n       I < IE; ++I) {\n    ArgumentAttributes.push_back(CallSiteAttrs.getParamAttrs(I));\n  }\n\n  return AttributeList::get(Ctx, CallSiteAttrs.getFnAttrs(),\n                            CallSiteAttrs.getRetAttrs(),\n                            llvm::makeArrayRef(ArgumentAttributes));\n}\n\nclass Taint {\n  friend struct TaintFunction;\n  friend class TaintVisitor;\n\n  enum { ShadowWidthBits  = 32, ShadowWidthBytes = ShadowWidthBits / 8 };\n\n  /// How should calls to uninstrumented functions be handled?\n  enum WrapperKind {\n    /// This function is present in an uninstrumented form but we don't know\n    /// how it should be handled.  Print a warning and call the function anyway.\n    /// Don't label the return value.\n    WK_Warning,\n\n    /// This function does not write to (user-accessible) memory, and its return\n    /// value is unlabelled.\n    WK_Discard,\n\n    /// This function does not write to (user-accessible) memory, and the label\n    /// of its return value is the union of the label of its arguments.\n    WK_Functional,\n\n    /// Instead of calling the function, a custom wrapper __dfsw_F is called,\n    /// where F is the name of the function.  This function may wrap the\n    /// original function or provide its own implementation.  This is similar to\n    /// the IA_Args ABI, except that IA_Args uses a struct return type to\n    /// pass the return value shadow in a register, while WK_Custom uses an\n    /// extra pointer argument to return the shadow.  This allows the wrapped\n    /// form of the function type to be expressed in C.\n    WK_Custom,\n\n    /// Special cases for memcmp, strcmp, strncmp like functions\n    WK_Memcmp,\n    WK_Strcmp,\n    WK_Strncmp,\n    WK_Strchr,    // strchr/memchr - find first char occurrence\n    WK_Strrchr,   // strrchr/memrchr - find last char occurrence\n    WK_Strstr,    // strstr/memmem - find substring\n    WK_Prefixof,  // prefix check (e.g., g_str_has_prefix)\n    WK_Suffixof,  // suffix check (e.g., g_str_has_suffix)\n    WK_Strcat,    // strcat/strncat - string concatenation\n    WK_Strsub,    // substr(s, start, len) - substring from start with len\n  };\n\n  Module *Mod;\n  LLVMContext *Ctx;\n  IntegerType *Int8Ty;\n  IntegerType *Int16Ty;\n  IntegerType *Int32Ty;\n  IntegerType *Int64Ty;\n  /// The shadow type for all primitive types and vector types.\n  IntegerType *PrimitiveShadowTy;\n  PointerType *PrimitiveShadowPtrTy;\n  IntegerType *IntptrTy;\n  PointerType *VoidPtrTy;\n  ConstantInt *ZeroPrimitiveShadow;\n  ConstantInt *UninitializedPrimitiveShadow;\n  ConstantInt *ShadowPtrAndMask;\n  ConstantInt *ShadowPtrXorMask;\n  ConstantInt *ShadowPtrBase;\n  ConstantInt *ShadowPtrMul;\n  Constant *ArgTLS;\n  Constant *RetvalTLS;\n  FunctionType *TaintUnionFnTy;\n  FunctionType *TaintUnionLoadFnTy;\n  FunctionType *TaintUnionStoreFnTy;\n  FunctionType *TaintUnimplementedFnTy;\n  FunctionType *TaintSetLabelFnTy;\n  FunctionType *TaintNonzeroLabelFnTy;\n  FunctionType *TaintVarargWrapperFnTy;\n  FunctionType *TaintTraceCmpFnTy;\n  FunctionType *TaintTraceCondFnTy;\n  FunctionType *TaintTraceLoopFnTy;\n  FunctionType *TaintTraceSwitchEndFnTy;\n  FunctionType *TaintTraceSelectFnTy;\n  FunctionType *TaintTraceIndirectCallFnTy;\n  FunctionType *TaintTraceGEPFnTy;\n  FunctionType *TaintTraceGEPPtrFnTy;\n  FunctionType *TaintPushStackFrameFnTy;\n  FunctionType *TaintPopStackFrameFnTy;\n  FunctionType *TaintTraceAllocaFnTy;\n  FunctionType *TaintCheckBoundsFnTy;\n  FunctionType *TaintSolveBoundsFnTy;\n  FunctionType *TaintSolveSizeFnTy;\n  FunctionType *TaintTraceGlobalFnTy;\n  FunctionType *TaintDebugFnTy;\n  FunctionCallee TaintUnionFn;\n  FunctionCallee TaintCheckedUnionFn;\n  FunctionCallee TaintUnionLoadFn;\n  FunctionCallee TaintUnionStoreFn;\n  FunctionCallee TaintUnimplementedFn;\n  FunctionCallee TaintSetLabelFn;\n  FunctionCallee TaintNonzeroLabelFn;\n  FunctionCallee TaintVarargWrapperFn;\n  FunctionCallee TaintTraceCmpFn;\n  FunctionCallee TaintTraceCondFn;\n  FunctionCallee TaintTraceLoopFn;\n  FunctionCallee TaintTraceSwitchEndFn;\n  FunctionCallee TaintTraceSelectFn;\n  FunctionCallee TaintTraceIndirectCallFn;\n  FunctionCallee TaintTraceGEPFn;\n  FunctionCallee TaintTraceGEPPtrFn;\n  FunctionCallee TaintPushStackFrameFn;\n  FunctionCallee TaintPopStackFrameFn;\n  FunctionCallee TaintTraceAllocaFn;\n  FunctionCallee TaintCheckBoundsFn;\n  FunctionCallee TaintSolveBoundsFn;\n  FunctionCallee TaintSolveSizeFn;\n  FunctionCallee TaintTraceGlobalFn;\n  FunctionCallee TaintDebugFn;\n  SmallPtrSet<Value *, 16> TaintRuntimeFunctions;\n  Constant *CallStack;\n  MDNode *ColdCallWeights;\n  TaintABIList ABIList;\n  DenseMap<Value *, Function *> UnwrappedFnMap;\n  AttributeMask ReadOnlyNoneAttrs;\n\n  /// Memory map parameters used in calculation mapping application addresses\n  /// to shadow addresses and origin addresses.\n  const MemoryMapParams *MapParams;\n\n  Value *getShadowOffset(Value *Addr, IRBuilder<> &IRB);\n  Value *getShadowAddress(Value *Addr, IRBuilder<> &IRB);\n  bool isInstrumented(const Function *F);\n  bool isInstrumented(const GlobalAlias *GA);\n  FunctionType *getArgsFunctionType(FunctionType *T);\n  bool isForceZeroLabels(const Function *F);\n  FunctionType *getTrampolineFunctionType(FunctionType *T);\n  TransformedFunction getCustomFunctionType(FunctionType *T);\n  WrapperKind getWrapperKind(Function *F);\n  void addGlobalNameSuffix(GlobalValue *GV);\n  Function *buildWrapperFunction(Function *F, StringRef NewFName,\n                                 GlobalValue::LinkageTypes NewFLink,\n                                 FunctionType *NewFT);\n  Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);\n\n  void addContextRecording(Function &F);\n  void addFrameTracing(Function &F);\n  uint32_t getInstructionId(Instruction *Inst);\n  const uint32_t InvalidInstructionId = -1;\n\n  void initializeRuntimeFunctions(Module &M);\n  void initializeCallbackFunctions(Module &M);\n  bool initializeModule(Module &M);\n\n  /// Returns a zero constant with the shadow type of OrigTy.\n  ///\n  /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...}\n  /// getZeroShadow([n x T]) = [n x getZeroShadow(T)]\n  /// getZeroShadow(other type) = i16(0)\n  ///\n  /// Note that a zero shadow is always i16(0) when shouldTrackFieldsAndIndices\n  /// returns false.\n  Constant *getZeroShadow(Type *OrigTy);\n  /// Returns a zero constant with the shadow type of V's type.\n  Constant *getZeroShadow(Value *V);\n\n  /// Checks if V is a zero shadow.\n  bool isZeroShadow(Value *V);\n\n  /// Returns the shadow type of OrigTy.\n  ///\n  /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...}\n  /// getShadowTy([n x T]) = [n x getShadowTy(T)]\n  /// getShadowTy(other type) = i16\n  ///\n  /// Note that a shadow type is always i16 when shouldTrackFieldsAndIndices\n  /// returns false.\n  Type *getShadowTy(Type *OrigTy);\n  /// Returns the shadow type of of V's type.\n  Type *getShadowTy(Value *V);\n\n  /// Returns an uninitialized shadow value with the shadow type of OrigTy.\n  Constant *getUninitializedShadow(Type *OrigTy);\n\npublic:\n  Taint(const std::vector<std::string> &ABIListFiles);\n\n  bool runImpl(Module &M);\n};\n\nstruct TaintFunction {\n  Taint &TT;\n  Function *F;\n  DominatorTree DT;\n  LoopInfo *LI;\n  bool IsNativeABI;\n  bool IsForceZeroLabels;\n  Value *ArgTLSPtr = nullptr;\n  Value *RetvalTLSPtr = nullptr;\n  AllocaInst *LabelReturnAlloca = nullptr;\n  DenseMap<Value *, Value *> ValShadowMap;\n  DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;\n\n  struct PHIFixupElement {\n    PHINode *Phi;\n    PHINode *ShadowPhi;\n  };\n  std::vector<PHIFixupElement> PHIFixups;\n\n  DenseSet<Instruction *> SkipInsts;\n  std::vector<Value *> NonZeroChecks;\n  bool AvoidNewBlocks;\n  std::hash<std::string> HashFn;\n\n  struct CachedShadow {\n    BasicBlock *Block; // The block where Shadow is defined.\n    Value *Shadow;\n  };\n  /// Maps a value to its latest shadow value in terms of domination tree.\n  DenseMap<std::pair<Value *, Value *>, CachedShadow> CachedShadows;\n  /// Maps a value to its latest collapsed shadow value it was converted to in\n  /// terms of domination tree. When ClDebugNonzeroLabels is on, this cache is\n  /// used at a post process where CFG blocks are split. So it does not cache\n  /// BasicBlock like CachedShadows, but uses domination between values.\n  DenseMap<Value *, Value *> CachedCollapsedShadows;\n  DenseMap<Value *, std::set<Value *>> ShadowElements;\n\n  TaintFunction(Taint &TT, Function *F, bool IsNativeABI,\n                bool IsForceZeroLabels)\n      : TT(TT), F(F), IsNativeABI(IsNativeABI),\n        IsForceZeroLabels(IsForceZeroLabels) {\n    DT.recalculate(*F);\n    LI = new LoopInfo(DT);\n    // initialize the pseudo-random number generator with the function name\n    srandom(std::hash<std::string>{}(F->getName().str()));\n  }\n\n  ~TaintFunction() { delete LI; }\n\n  /// Computes the shadow address for a given function argument.\n  ///\n  /// Shadow = ArgTLS+ArgOffset.\n  Value *getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB);\n\n  /// Computes the shadow address for a retval.\n  Value *getRetvalTLS(Type *T, IRBuilder<> &IRB);\n\n  Value *getShadow(Value *V);\n  void setShadow(Instruction *I, Value *Shadow);\n\n  /// Returns the shadow value of a global variable GV.\n  Value *getShadowForGlobal(GlobalVariable *GV, IRBuilder<> &IRB);\n\n  // Op Shadow\n  Value *combineShadows(Value *V1, Value *V2,\n                        uint16_t op, Instruction *Pos);\n  Value *combineBinaryOperatorShadows(BinaryOperator *BO, uint8_t op);\n  Value *combineCastInstShadows(CastInst *CI, uint8_t op);\n  Value *combineCmpInstShadows(CmpInst *CI, uint8_t op);\n  void visitCmpInst(CmpInst *I);\n  void visitCondition(Value *Cond, Instruction *I);\n  void visitSwitchInst(SwitchInst *I);\n  Value *visitSelectInst(Value *Cond, Value *TS, Value *FS, SelectInst *I);\n  void visitGEPInst(GetElementPtrInst *I);\n  Value *visitAllocaInst(AllocaInst *I, Value *ArraySize, Type *ElTy);\n  void checkBounds(Value *Ptr, Value *Size, Instruction *Pos);\n  void solveBounds(Value *Ptr, Value *Size, Instruction *Pos);\n\n  /// XXX: because we never collapse taint labels for aggregate types,\n  ///      we also do not expand taint labels from an aggreated primitive\n  ///      shadow value. Instead, we always load the label for each\n  ///      primitive field.\n  ///\n  /// Load all primitive subtypes of T, returning the aggrate shadow value.\n  ///\n  /// LS({T1,T2, ...}, Addr) = {LS(T1, SubAdrr),LS(T2, SubAddr),...}\n  /// LS([n x T], Addr) = [n x LS(T, SubAddr)]\n  /// LS(other types, Addr) = LS(PS, Addr)\n  Value *loadShadow(Type *T, Value *Addr, uint64_t Size, Align Alignment,\n                    Instruction *Pos);\n\n  /// XXX: we do not union taint labels for aggregate types before store;\n  ///      instead, we store each privimitive field individually.\n  ///\n  /// Store all primitive subtypes of T, using the aggrate shadow value.\n  ///\n  /// SS(Addr, {T1,T2, ...}) = SS(SubAddr, T1), SS(SubAddr, T2), ...\n  /// SS(Addr, [T1,T2,...]) = SS(SubAddr, T1), SS(SubAddr, T2), ...\n  /// SS(Addr, PS) = SS(Addr, PS)\n  void storeShadow(Value *Addr, Type *T, uint64_t Size, Align Alignment,\n                   Value *Shadow, Instruction *Pos);\n\n  Align getShadowAlign(Align InstAlignment);\n\nprivate:\n  /// Loads a primitive shadow label\n  Value *loadPrimitiveShadow(Value *Addr, uint64_t Size, uint64_t Align,\n                             IRBuilder<> &IRB);\n  /// Loads shadow recursively for aggregate types\n  void loadShadowRecursive(Value *Shadow, SmallVector<unsigned, 4> &Indices,\n                           Type *SubTy, Value *Addr, uint64_t Size,\n                           uint64_t Align, IRBuilder<> &IRB);\n  /// Stores an aggregate shadow label\n  void storeShadowRecursive(Value *Shadow, SmallVector<unsigned, 4> &Indices,\n                            Type *SubShadowTy, Value *ShadowAddr, uint64_t Size,\n                            uint64_t Align, IRBuilder<> &IRB);\n  /// Returns the shadow value of an argument A.\n  Value *getShadowForTLSArgument(Argument *A);\n\n  static const uint8_t TrueBranchLoopLatch = 0x8;\n  static const uint8_t FalseBranchLoopLatch = 0x4;\n  static const uint8_t TrueBranchLoopExit = 0x2;\n  static const uint8_t FalseBranchLoopExit = 0x1;\n  static const uint8_t LoopExitBranch = TrueBranchLoopExit | FalseBranchLoopExit;\n};\n\nclass TaintVisitor : public InstVisitor<TaintVisitor> {\npublic:\n  TaintFunction &TF;\n\n  TaintVisitor(TaintFunction &TF) : TF(TF) {}\n\n  const DataLayout &getDataLayout() const {\n    return TF.F->getParent()->getDataLayout();\n  }\n\n  //void visitUnaryOperator(UnaryOperator &UO);\n  void visitBinaryOperator(BinaryOperator &BO);\n  void visitCastInst(CastInst &CI);\n  void visitCmpInst(CmpInst &CI);\n  void visitLandingPadInst(LandingPadInst &LPI);\n  void visitGetElementPtrInst(GetElementPtrInst &GEPI);\n  void visitLoadInst(LoadInst &LI);\n  void visitStoreInst(StoreInst &SI);\n  void visitAtomicRMWInst(AtomicRMWInst &I);\n  //void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);\n  void visitReturnInst(ReturnInst &RI);\n  void visitCallBase(CallBase &CB);\n  void visitPHINode(PHINode &PN);\n  void visitExtractElementInst(ExtractElementInst &I);\n  void visitInsertElementInst(InsertElementInst &I);\n  void visitShuffleVectorInst(ShuffleVectorInst &I);\n  void visitExtractValueInst(ExtractValueInst &I);\n  void visitInsertValueInst(InsertValueInst &I);\n  void visitAllocaInst(AllocaInst &I);\n  void visitSelectInst(SelectInst &I);\n  void visitMemSetInst(MemSetInst &I);\n  void visitMemTransferInst(MemTransferInst &I);\n  void visitBranchInst(BranchInst &BR);\n  void visitSwitchInst(SwitchInst &SW);\n\nprivate:\n  //void visitCASOrRMW(Align InstAlignment, Instruction &I);\n\n  // Returns false when this is an invoke of a custom function.\n  bool visitWrappedCallBase(Function *F, CallBase &CB);\n\n  void addShadowArguments(Function *F, CallBase &CB, std::vector<Value *> &Args,\n                          IRBuilder<> &IRB);\n\n  void visitIntrinsicCallBase(Function *F, CallBase &CB);\n};\n\n} // end anonymous namespace\n\nTaint::Taint(\n    const std::vector<std::string> &ABIListFiles) {\n  std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));\n  llvm::append_range(AllABIListFiles, ClABIListFiles);\n  // FIXME: should we propagate vfs::FileSystem to this constructor?\n  ABIList.set(\n      SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem()));\n}\n\nFunctionType *Taint::getArgsFunctionType(FunctionType *T) {\n  SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());\n  // we keep the shadow type consistent with the arg type so we don't\n  // need to collapse or expand the shadow\n  for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) {\n    Type* param_type = T->getParamType(i);\n    ArgTypes.push_back(getShadowTy(param_type));\n  }\n  // ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);\n  if (T->isVarArg()) // FIXME: vararg\n    ArgTypes.push_back(PrimitiveShadowPtrTy);\n  Type *RetType = T->getReturnType();\n  if (!RetType->isVoidTy())\n    RetType = StructType::get(RetType, getShadowTy(RetType));\n  return FunctionType::get(RetType, ArgTypes, T->isVarArg());\n}\n\nFunctionType *Taint::getTrampolineFunctionType(FunctionType *T) {\n  assert(!T->isVarArg());\n  SmallVector<Type *, 4> ArgTypes;\n  ArgTypes.push_back(T->getPointerTo());\n  ArgTypes.append(T->param_begin(), T->param_end());\n  // we keep the shadow type consistent with the arg type so we don't\n  // need to collapse or expand the shadow\n  for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) {\n    Type* param_type = T->getParamType(i);\n    ArgTypes.push_back(getShadowTy(param_type));\n  }\n  // ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);\n  Type *RetType = T->getReturnType();\n  if (!RetType->isVoidTy())\n    // ArgTypes.push_back(PrimitiveShadowPtrTy);\n    ArgTypes.push_back(PointerType::getUnqual(getShadowTy(RetType)));\n  return FunctionType::get(T->getReturnType(), ArgTypes, false);\n}\n\nTransformedFunction Taint::getCustomFunctionType(FunctionType *T) {\n  SmallVector<Type *, 4> ArgTypes;\n\n  // Some parameters of the custom function being constructed are\n  // parameters of T.  Record the mapping from parameters of T to\n  // parameters of the custom function, so that parameter attributes\n  // at call sites can be updated.\n  std::vector<unsigned> ArgumentIndexMapping;\n  for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {\n    Type* ParamType = T->getParamType(I);\n    FunctionType *FT;\n    if (isa<PointerType>(ParamType) &&\n        (FT = dyn_cast<FunctionType>(ParamType->getPointerElementType()))) {\n      ArgumentIndexMapping.push_back(ArgTypes.size());\n      ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());\n      ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));\n    } else {\n      ArgumentIndexMapping.push_back(ArgTypes.size());\n      ArgTypes.push_back(ParamType);\n    }\n  }\n  for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) {\n    // we keep the shadow type consistent with the arg type so we don't\n    // need to collapse or expand the shadow\n    Type* param_type = T->getParamType(i);\n    ArgTypes.push_back(getShadowTy(param_type));\n    // ArgTypes.push_back(PrimitiveShadowTy);\n  }\n  if (T->isVarArg()) // FIXME: vararg\n    ArgTypes.push_back(PrimitiveShadowPtrTy);\n  Type *RetType = T->getReturnType();\n  if (!RetType->isVoidTy())\n    ArgTypes.push_back(PointerType::getUnqual(getShadowTy(RetType)));\n    // ArgTypes.push_back(PrimitiveShadowPtrTy);\n  return TransformedFunction(\n      T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),\n      ArgumentIndexMapping);\n}\n\nbool Taint::isZeroShadow(Value *V) {\n  Type *T = V->getType();\n  if (!isa<ArrayType>(T) && !isa<StructType>(T)) {\n    if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))\n      return CI->isZero();\n    return false;\n  }\n\n  return isa<ConstantAggregateZero>(V);\n}\n\nConstant *Taint::getUninitializedShadow(Type *OrigTy) {\n  if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy))\n    return UninitializedPrimitiveShadow;\n  Type *ShadowTy = getShadowTy(OrigTy);\n  if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {\n    SmallVector<Constant *, 4> Elements(AT->getNumElements(),\n                                        getUninitializedShadow(AT->getElementType()));\n    return ConstantArray::get(AT, Elements);\n  } else if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {\n    SmallVector<Constant *, 4> Elements(ST->getNumElements());\n    for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I)\n      Elements[I] = getUninitializedShadow(ST->getElementType(I));\n    return ConstantStruct::get(ST, Elements);\n  }\n  llvm_unreachable(\"Unexpected type for uninitialized shadow\");\n}\n\nConstant *Taint::getZeroShadow(Type *OrigTy) {\n  if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy))\n    return ZeroPrimitiveShadow;\n  Type *ShadowTy = getShadowTy(OrigTy);\n  return ConstantAggregateZero::get(ShadowTy);\n}\n\nConstant *Taint::getZeroShadow(Value *V) {\n  return getZeroShadow(V->getType());\n}\n\nType *Taint::getShadowTy(Type *OrigTy) {\n  if (!OrigTy->isSized())\n    return PrimitiveShadowTy;\n  if (isa<IntegerType>(OrigTy))\n    return PrimitiveShadowTy;\n  if (isa<VectorType>(OrigTy))\n    return PrimitiveShadowTy;\n  if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy))\n    return ArrayType::get(getShadowTy(AT->getElementType()),\n                          AT->getNumElements());\n  if (StructType *ST = dyn_cast<StructType>(OrigTy)) {\n    SmallVector<Type *, 4> Elements;\n    for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I)\n      Elements.push_back(getShadowTy(ST->getElementType(I)));\n    return StructType::get(*Ctx, Elements);\n  }\n  return PrimitiveShadowTy;\n}\n\nType *Taint::getShadowTy(Value *V) {\n  return getShadowTy(V->getType());\n}\n\nuint32_t Taint::getInstructionId(Instruction *Inst) {\n  // check if there is a bbid annotation\n  if (MDNode *BBID = Inst->getMetadata(\"bbid\")) {\n    auto C = dyn_cast<ConstantAsMetadata>(BBID->getOperand(0));\n    if (ConstantInt *CI = dyn_cast<ConstantInt>(C->getValue())) {\n      uint64_t BBIDValue = CI->getZExtValue();\n      assert(BBIDValue < UINT32_MAX &&\n             \"bbid value is too large for 32-bit hash\");\n      return static_cast<uint32_t>(BBIDValue);\n    }\n  }\n  if (ClTraceAnnotatedBB && Inst->isTerminator())\n    return InvalidInstructionId;\n\n  // otherwise, fallback to hash\n  static uint32_t unamed = 0;\n  auto SourceInfo = Mod->getSourceFileName();\n  DILocation *Loc = Inst->getDebugLoc();\n  if (Loc) {\n    auto Line = Loc->getLine();\n    auto Col = Loc->getColumn();\n    SourceInfo += \":\" + std::to_string(Line) + \":\" + std::to_string(Col);\n  } else {\n    SourceInfo += \"unamed:\" + std::to_string(unamed++);\n  }\n\n  return djbHash(SourceInfo);\n}\n\nvoid Taint::addContextRecording(Function &F) {\n  // Most code from Angora\n  BasicBlock *BB = &F.getEntryBlock();\n  assert(pred_begin(BB) == pred_end(BB) &&\n         \"Assume that entry block has no predecessors\");\n\n  // Add ctx ^ hash(fun_name) at the beginning of a function\n  IRBuilder<> IRB(&*(BB->getFirstInsertionPt()));\n\n  // Strip dfs$ prefix\n  auto FName = F.getName();\n  if (FName.startswith(\"dfs\")) {\n    size_t pos = FName.find_first_of('$');\n    FName = FName.drop_front(pos + 1);\n  }\n  // add source file name for static function\n  if (!F.hasExternalLinkage()) {\n    FName = StringRef(Mod->getSourceFileName() + \"::\" + FName.str());\n  }\n  uint32_t hash = djbHash(FName);\n\n  ConstantInt *CID = ConstantInt::get(Int32Ty, hash);\n  LoadInst *LCS = IRB.CreateLoad(Int32Ty, CallStack);\n  LCS->setMetadata(Mod->getMDKindID(\"nosanitize\"), MDNode::get(*Ctx, None));\n  Value *NCS = IRB.CreateXor(LCS, CID);\n  StoreInst *SCS = IRB.CreateStore(NCS, CallStack);\n  SCS->setMetadata(Mod->getMDKindID(\"nosanitize\"), MDNode::get(*Ctx, None));\n\n  // Recover ctx at the end of a function\n  for (auto FI = F.begin(), FE = F.end(); FI != FE; FI++) {\n    BasicBlock *BB = &*FI;\n    Instruction *Inst = BB->getTerminator();\n    if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst)) {\n      IRB.SetInsertPoint(Inst);\n      SCS = IRB.CreateStore(LCS, CallStack);\n      SCS->setMetadata(Mod->getMDKindID(\"nosanitize\"), MDNode::get(*Ctx, None));\n    }\n  }\n}\n\nvoid Taint::addFrameTracing(Function &F) {\n  BasicBlock *BB = &F.getEntryBlock();\n  assert(pred_begin(BB) == pred_end(BB) &&\n         \"Assume that entry block has no predecessors\");\n\n  IRBuilder<> IRB(&*(BB->getFirstInsertionPt()));\n  IRB.CreateCall(TaintPushStackFrameFn);\n\n  // Recover ctx at the end of a function\n  for (auto FI = F.begin(), FE = F.end(); FI != FE; FI++) {\n    BasicBlock *BB = &*FI;\n    Instruction *Inst = BB->getTerminator();\n    if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst)) {\n      IRB.SetInsertPoint(Inst);\n      IRB.CreateCall(TaintPopStackFrameFn);\n    }\n  }\n}\n\nbool Taint::initializeModule(Module &M) {\n  Triple TargetTriple(M.getTargetTriple());\n  const DataLayout &DL = M.getDataLayout();\n\n  if (TargetTriple.getOS() != Triple::Linux)\n    report_fatal_error(\"unsupported operating system\");\n  switch (TargetTriple.getArch()) {\n  case Triple::x86_64:\n    MapParams = &Linux_X86_64_MemoryMapParams;\n    break;\n  default:\n    report_fatal_error(\"unsupported architecture\");\n  }\n\n  Mod = &M;\n  Ctx = &M.getContext();\n  Int8Ty = IntegerType::get(*Ctx, 8);\n  Int16Ty = IntegerType::get(*Ctx, 16);\n  Int32Ty = IntegerType::get(*Ctx, 32);\n  Int64Ty = IntegerType::get(*Ctx, 64);\n  PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);\n  PrimitiveShadowPtrTy = PointerType::getUnqual(PrimitiveShadowTy);\n  IntptrTy = DL.getIntPtrType(*Ctx);\n  VoidPtrTy = PointerType::getUnqual(Int8Ty);\n  ZeroPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, 0);\n  UninitializedPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, -1);\n  ShadowPtrMul = ConstantInt::get(IntptrTy, ShadowWidthBytes);\n  ShadowPtrAndMask = ShadowPtrXorMask = ShadowPtrBase = nullptr;\n  if (MapParams->AndMask != 0)\n    ShadowPtrAndMask = ConstantInt::get(IntptrTy, ~MapParams->AndMask);\n  if (MapParams->XorMask != 0)\n    ShadowPtrXorMask = ConstantInt::get(IntptrTy, MapParams->XorMask);\n  if (MapParams->ShadowBase != 0)\n    ShadowPtrBase = ConstantInt::get(IntptrTy, MapParams->ShadowBase);\n\n  Type *TaintUnionArgs[6] = { PrimitiveShadowTy, PrimitiveShadowTy,\n      Int16Ty, Int16Ty, Int64Ty, Int64Ty};\n  TaintUnionFnTy = FunctionType::get(\n      PrimitiveShadowTy, TaintUnionArgs, /*isVarArg=*/ false);\n  Type *TaintUnionLoadArgs[3] = { PrimitiveShadowPtrTy, IntptrTy, Int64Ty };\n  TaintUnionLoadFnTy = FunctionType::get(\n      PrimitiveShadowTy, TaintUnionLoadArgs, /*isVarArg=*/ false);\n  Type *TaintUnionStoreArgs[4] = { PrimitiveShadowTy, PrimitiveShadowPtrTy,\n      IntptrTy, Int64Ty };\n  TaintUnionStoreFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), TaintUnionStoreArgs, /*isVarArg=*/ false);\n  TaintUnimplementedFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);\n  Type *TaintSetLabelArgs[3] = { PrimitiveShadowTy, Type::getInt8PtrTy(*Ctx),\n      IntptrTy };\n  TaintSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),\n                                        TaintSetLabelArgs, /*isVarArg=*/false);\n  TaintNonzeroLabelFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);\n  TaintVarargWrapperFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);\n  Type *TaintTraceCmpArgs[7] = { PrimitiveShadowTy, PrimitiveShadowTy,\n      Int32Ty, Int32Ty, Int64Ty, Int64Ty, Int32Ty };\n  TaintTraceCmpFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), TaintTraceCmpArgs, false);\n  Type *TaintTraceCondArgs[4] = { PrimitiveShadowTy, IntegerType::get(*Ctx, 1),\n      Int8Ty, Int32Ty };\n  TaintTraceCondFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), TaintTraceCondArgs, false);\n  TaintTraceLoopFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), { Int32Ty, Int32Ty }, false);\n  TaintTraceSwitchEndFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), { Int32Ty }, false);\n  Type *TaintTraceSelectArgs[] = { PrimitiveShadowTy, PrimitiveShadowTy,\n      PrimitiveShadowTy, Int8Ty, Int8Ty, Int8Ty, Int32Ty };\n  TaintTraceSelectFnTy = FunctionType::get(\n      PrimitiveShadowTy, TaintTraceSelectArgs, false);\n  TaintTraceIndirectCallFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), { PrimitiveShadowTy }, false);\n  Type *TaintTraceGEPArgs[8] = { PrimitiveShadowTy, Int64Ty, PrimitiveShadowTy,\n      Int64Ty, Int64Ty, Int64Ty, Int64Ty, Int32Ty };\n  TaintTraceGEPFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), TaintTraceGEPArgs, false);\n  // __taint_trace_gep_ptr(base_label, offset) -> new_label\n  TaintTraceGEPPtrFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), { PrimitiveShadowTy, VoidPtrTy, VoidPtrTy }, false);\n  TaintPushStackFrameFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), {}, false);\n  TaintPopStackFrameFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), {}, false);\n  Type *TaintTraceAllocaArgs[4] =\n      { PrimitiveShadowTy, Int64Ty, Int64Ty, Int64Ty };\n  TaintTraceAllocaFnTy = FunctionType::get(\n      PrimitiveShadowTy, TaintTraceAllocaArgs, false);\n  TaintCheckBoundsFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx),\n      { PrimitiveShadowTy, Int64Ty, PrimitiveShadowTy, Int64Ty }, false);\n  TaintSolveBoundsFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx), TaintTraceGEPArgs, false); // use the same args as GEP\n  TaintSolveSizeFnTy = FunctionType::get(\n      Type::getVoidTy(*Ctx),\n      { PrimitiveShadowTy, Int64Ty, PrimitiveShadowTy, Int64Ty, Int32Ty }, false);\n  TaintTraceGlobalFnTy = FunctionType::get(\n      PrimitiveShadowTy, { Int64Ty, Int64Ty }, false);\n\n  TaintDebugFnTy = FunctionType::get(Type::getVoidTy(*Ctx),\n      {PrimitiveShadowTy, PrimitiveShadowTy, PrimitiveShadowTy,\n       PrimitiveShadowTy, PrimitiveShadowTy}, false);\n\n  ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);\n  return true;\n}\n\nbool Taint::isInstrumented(const Function *F) {\n  return !ABIList.isIn(*F, \"uninstrumented\");\n}\n\nbool Taint::isInstrumented(const GlobalAlias *GA) {\n  return !ABIList.isIn(*GA, \"uninstrumented\");\n}\n\nbool Taint::isForceZeroLabels(const Function *F) {\n  return ABIList.isIn(*F, \"force_zero_labels\");\n}\n\nTaint::WrapperKind Taint::getWrapperKind(Function *F) {\n  // priority custom\n  if (ABIList.isIn(*F, \"custom\"))\n    return WK_Custom;\n  if (ABIList.isIn(*F, \"memcmp\"))\n    return WK_Memcmp;\n  if (ABIList.isIn(*F, \"strcmp\"))\n    return WK_Strcmp;\n  if (ABIList.isIn(*F, \"strncmp\"))\n    return WK_Strncmp;\n  if (ABIList.isIn(*F, \"strchr\"))\n    return WK_Strchr;\n  if (ABIList.isIn(*F, \"strrchr\"))\n    return WK_Strrchr;\n  if (ABIList.isIn(*F, \"strstr\"))\n    return WK_Strstr;\n  if (ABIList.isIn(*F, \"prefixof\"))\n    return WK_Prefixof;\n  if (ABIList.isIn(*F, \"suffixof\"))\n    return WK_Suffixof;\n  if (ABIList.isIn(*F, \"strcat\"))\n    return WK_Strcat;\n  if (ABIList.isIn(*F, \"strsub\"))\n    return WK_Strsub;\n  if (ABIList.isIn(*F, \"functional\"))\n    return WK_Functional;\n  if (ABIList.isIn(*F, \"discard\"))\n    return WK_Discard;\n\n  return WK_Warning;\n}\n\nvoid Taint::addGlobalNameSuffix(GlobalValue *GV) {\n  std::string GVName = std::string(GV->getName()), Suffix = \".taint\";\n  GV->setName(GVName + Suffix);\n\n  // Try to change the name of the function in module inline asm.  We only do\n  // this for specific asm directives, currently only \".symver\", to try to avoid\n  // corrupting asm which happens to contain the symbol name as a substring.\n  // Note that the substitution for .symver assumes that the versioned symbol\n  // also has an instrumented name.\n  std::string Asm = GV->getParent()->getModuleInlineAsm();\n  std::string SearchStr = \".symver \" + GVName + \",\";\n  size_t Pos = Asm.find(SearchStr);\n  if (Pos != std::string::npos) {\n    Asm.replace(Pos, SearchStr.size(), \".symver \" + GVName + Suffix + \",\");\n    Pos = Asm.find(\"@\");\n\n    if (Pos == std::string::npos)\n      report_fatal_error(Twine(\"unsupported .symver: \", Asm));\n\n    Asm.replace(Pos, 1, Suffix + \"@\");\n    GV->getParent()->setModuleInlineAsm(Asm);\n  }\n}\n\nFunction *\nTaint::buildWrapperFunction(Function *F, StringRef NewFName,\n                            GlobalValue::LinkageTypes NewFLink,\n                            FunctionType *NewFT) {\n  FunctionType *FT = F->getFunctionType();\n  Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(),\n                                    NewFName, F->getParent());\n  NewF->copyAttributesFrom(F);\n  NewF->removeRetAttrs(\n      AttributeFuncs::typeIncompatible(NewFT->getReturnType()));\n\n  BasicBlock *BB = BasicBlock::Create(*Ctx, \"entry\", NewF);\n  if (F->isVarArg() && getWrapperKind(F) != WK_Custom) {\n    // keep the invocation if custom (e.g., open)\n    NewF->removeFnAttr(\"split-stack\");\n    CallInst::Create(TaintVarargWrapperFn,\n                     IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), \"\",\n                     BB);\n    new UnreachableInst(*Ctx, BB);\n  } else {\n    auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());\n    std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams());\n\n    CallInst *CI = CallInst::Create(F, Args, \"\", BB);\n    if (FT->getReturnType()->isVoidTy())\n      ReturnInst::Create(*Ctx, BB);\n    else\n      ReturnInst::Create(*Ctx, CI, BB);\n  }\n\n  return NewF;\n}\n\nConstant *Taint::getOrBuildTrampolineFunction(FunctionType *FT,\n                                              StringRef FName) {\n  FunctionType *FTT = getTrampolineFunctionType(FT);\n  FunctionCallee C = Mod->getOrInsertFunction(FName, FTT);\n  Function *F = dyn_cast<Function>(C.getCallee());\n  if (F && F->isDeclaration()) {\n    F->setLinkage(GlobalValue::LinkOnceODRLinkage);\n    BasicBlock *BB = BasicBlock::Create(*Ctx, \"entry\", F);\n    std::vector<Value *> Args;\n    Function::arg_iterator AI = F->arg_begin() + 1;\n    for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)\n      Args.push_back(&*AI);\n    CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, \"\", BB);\n    Type *RetType = FT->getReturnType();\n    ReturnInst *RI = RetType->isVoidTy() ? ReturnInst::Create(*Ctx, BB)\n                                         : ReturnInst::Create(*Ctx, CI, BB);\n\n    // F is called by a wrapped custom function with primitive shadows. So\n    // its arguments and return value need conversion.\n    TaintFunction TF(*this, F, /*IsNativeABI=*/true,\n                     /*IsForceZeroLabels=*/false);\n    Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI;\n    ++ValAI;\n    for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) {\n      // we don't collapse or expand the shadow\n      TF.ValShadowMap[&*ValAI] = &*ShadowAI;\n    }\n    Function::arg_iterator RetShadowAI = ShadowAI;\n    TaintVisitor(TF).visitCallInst(*CI);\n    if (!RetType->isVoidTy()) {\n      // we don't collapse or expand the shadow\n      new StoreInst(TF.getShadow(RI->getReturnValue()),\n                    &*std::prev(F->arg_end()), RI);\n    }\n  }\n\n  return cast<Constant>(C.getCallee());\n}\n\n// Initialize DataFlowSanitizer runtime functions and declare them in the module\nvoid Taint::initializeRuntimeFunctions(Module &M) {\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);\n    TaintUnionFn =\n        Mod->getOrInsertFunction(\"__taint_union\", TaintUnionFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);\n    TaintCheckedUnionFn =\n        Mod->getOrInsertFunction(\"taint_union\", TaintUnionFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);\n    TaintUnionLoadFn =\n        Mod->getOrInsertFunction(\"__taint_union_load\", TaintUnionLoadFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    TaintUnionStoreFn =\n        Mod->getOrInsertFunction(\"__taint_union_store\", TaintUnionStoreFnTy, AL);\n  }\n  {\n    TaintUnimplementedFn =\n        Mod->getOrInsertFunction(\"__dfsan_unimplemented\", TaintUnimplementedFnTy);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    TaintSetLabelFn =\n        Mod->getOrInsertFunction(\"__dfsan_set_label\", TaintSetLabelFnTy, AL);\n  }\n  {\n    TaintNonzeroLabelFn =\n        Mod->getOrInsertFunction(\"__dfsan_nonzero_label\", TaintNonzeroLabelFnTy);\n  }\n  {\n    TaintVarargWrapperFn = Mod->getOrInsertFunction(\"__dfsan_vararg_wrapper\",\n                                                    TaintVarargWrapperFnTy);\n  }\n  {\n    TaintDebugFn =\n        Mod->getOrInsertFunction(\"__taint_debug\", TaintDebugFnTy);\n  }\n\n  TaintRuntimeFunctions.insert(\n      TaintUnionFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintCheckedUnionFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintUnionLoadFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintUnionStoreFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintSetLabelFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintUnimplementedFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintNonzeroLabelFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintVarargWrapperFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintDebugFn.getCallee()->stripPointerCasts());\n}\n\n// Initializes event callback functions and declare them in the module\nvoid Taint::initializeCallbackFunctions(Module &M) {\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);\n    TaintTraceCmpFn =\n        Mod->getOrInsertFunction(\"__taint_trace_cmp\", TaintTraceCmpFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);\n    TaintTraceCondFn =\n        Mod->getOrInsertFunction(\"__taint_trace_cond\", TaintTraceCondFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    TaintTraceLoopFn =\n        Mod->getOrInsertFunction(\"__taint_trace_loop\", TaintTraceLoopFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    TaintTraceSwitchEndFn =\n        Mod->getOrInsertFunction(\"__taint_trace_switch_end\", TaintTraceSwitchEndFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);\n    TaintTraceSelectFn =\n        Mod->getOrInsertFunction(\"__taint_trace_select\", TaintTraceSelectFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    TaintTraceIndirectCallFn =\n        Mod->getOrInsertFunction(\"__taint_trace_indcall\", TaintTraceIndirectCallFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);\n    TaintTraceGEPFn =\n        Mod->getOrInsertFunction(\"__taint_trace_gep\", TaintTraceGEPFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    TaintTraceGEPPtrFn =\n        Mod->getOrInsertFunction(\"__taint_trace_gep_ptr\", TaintTraceGEPPtrFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    TaintPushStackFrameFn =\n        Mod->getOrInsertFunction(\"__taint_push_stack_frame\", TaintPushStackFrameFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    TaintPopStackFrameFn =\n        Mod->getOrInsertFunction(\"__taint_pop_stack_frame\", TaintPopStackFrameFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    TaintTraceAllocaFn =\n        Mod->getOrInsertFunction(\"__taint_trace_alloca\", TaintTraceAllocaFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);\n    TaintTraceGlobalFn =\n        Mod->getOrInsertFunction(\"__taint_trace_global\", TaintTraceGlobalFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    TaintCheckBoundsFn =\n        Mod->getOrInsertFunction(\"__taint_check_bounds\", TaintCheckBoundsFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);\n    TaintSolveBoundsFn =\n        Mod->getOrInsertFunction(\"__taint_solve_bounds\", TaintSolveBoundsFnTy, AL);\n  }\n  {\n    AttributeList AL;\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoUnwind);\n    AL = AL.addFnAttribute(M.getContext(), Attribute::NoMerge);\n    AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);\n    AL = AL.addParamAttribute(M.getContext(), 2, Attribute::ZExt);\n    TaintSolveSizeFn =\n        Mod->getOrInsertFunction(\"__taint_solve_size\", TaintSolveSizeFnTy, AL);\n  }\n\n  TaintRuntimeFunctions.insert(\n      TaintTraceCmpFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceCondFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceLoopFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceSwitchEndFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceSelectFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceIndirectCallFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceGEPFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceGEPPtrFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintPushStackFrameFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintPopStackFrameFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceAllocaFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintTraceGlobalFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintCheckBoundsFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintSolveBoundsFn.getCallee()->stripPointerCasts());\n  TaintRuntimeFunctions.insert(\n      TaintSolveSizeFn.getCallee()->stripPointerCasts());\n}\n\nbool Taint::runImpl(Module &M) {\n  initializeModule(M);\n\n  if (ABIList.isIn(M, \"skip\"))\n    return false;\n\n  const unsigned InitialGlobalSize = M.global_size();\n  const unsigned InitialModuleSize = M.size();\n\n  bool Changed = false;\n\n  auto GetOrInsertGlobal = [this, &Changed](StringRef Name,\n                                            Type *Ty) -> Constant * {\n    Constant *C = Mod->getOrInsertGlobal(Name, Ty);\n    if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {\n      Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;\n      G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);\n    }\n    return C;\n  };\n\n  // These globals must be kept in sync with the ones in dfsan.cpp.\n  ArgTLS =\n      GetOrInsertGlobal(\"__dfsan_arg_tls\",\n                        ArrayType::get(Int64Ty, ArgTLSSize / 8));\n  RetvalTLS =\n      GetOrInsertGlobal(\"__dfsan_retval_tls\",\n                        ArrayType::get(Int64Ty, RetvalTLSSize / 8));\n  CallStack = GetOrInsertGlobal(\"__taint_trace_callstack\", Int32Ty);\n\n  initializeCallbackFunctions(M);\n  initializeRuntimeFunctions(M);\n\n  std::vector<Function *> FnsToInstrument;\n  SmallPtrSet<Function *, 8> IFuncs;\n  SmallPtrSet<Function *, 2> FnsWithNativeABI;\n  SmallPtrSet<Function *, 2> FnsWithForceZeroLabel;\n  SmallPtrSet<Constant *, 1> PersonalityFns;\n\n  // find ifunc resolvers and their dependencies, we can't instrument them\n  // as dfsan initialization is not done yet\n  for (auto &ifunc : M.ifuncs()) {\n    auto *resolver = ifunc.getResolverFunction();\n    IFuncs.insert(resolver);\n    for (auto &I : instructions(resolver)) {\n      if (CallBase *CB = dyn_cast<CallBase>(&I)) {\n        if (Function *Callee = CB->getCalledFunction()) {\n          IFuncs.insert(Callee);\n        }\n      }\n    }\n  }\n\n  for (Function &F : M) {\n    if (!F.isIntrinsic() && !TaintRuntimeFunctions.count(&F) &&\n        !IFuncs.count(&F)) {\n      FnsToInstrument.push_back(&F);\n      if (F.hasPersonalityFn())\n        PersonalityFns.insert(F.getPersonalityFn());\n    }\n  }\n\n  if (ClIgnorePersonalityRoutine) {\n    for (auto *C : PersonalityFns) {\n      assert(isa<Function>(C) && \"Personality routine is not a function!\");\n      Function *F = cast<Function>(C);\n      if (!isInstrumented(F))\n        FnsToInstrument.erase(\n            std::remove(FnsToInstrument.begin(), FnsToInstrument.end(), F),\n            FnsToInstrument.end());\n    }\n  }\n\n  // Give function aliases suffixes when necessary, and build wrappers where the\n  // instrumentedness is inconsistent.\n  for (GlobalAlias &GA : llvm::make_early_inc_range(M.aliases())) {\n    // Don't stop on weak.  We assume people aren't playing games with the\n    // instrumentedness of overridden weak aliases.\n    auto F = dyn_cast<Function>(GA.getAliaseeObject());\n    if (!F)\n      continue;\n\n    bool GAInst = isInstrumented(&GA), FInst = isInstrumented(F);\n    if (GAInst && FInst) {\n      addGlobalNameSuffix(&GA);\n    } else if (GAInst != FInst) {\n      // Non-instrumented alias of an instrumented function, or vice versa.\n      // Replace the alias with a native-ABI wrapper of the aliasee.  The pass\n      // below will take care of instrumenting it.\n      Function *NewF =\n          buildWrapperFunction(F, \"\", GA.getLinkage(), F->getFunctionType());\n      GA.replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA.getType()));\n      NewF->takeName(&GA);\n      GA.eraseFromParent();\n      FnsToInstrument.push_back(NewF);\n    }\n  }\n\n  ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly)\n      .addAttribute(Attribute::ReadNone);\n\n  // First, change the ABI of every function in the module.  ABI-listed\n  // functions keep their original ABI and get a wrapper function.\n  for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(),\n                                         FE = FnsToInstrument.end();\n       FI != FE; ++FI) {\n    Function &F = **FI;\n    FunctionType *FT = F.getFunctionType();\n\n    bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&\n                              FT->getReturnType()->isVoidTy());\n\n    if (isInstrumented(&F)) {\n      if (isForceZeroLabels(&F))\n        FnsWithForceZeroLabel.insert(&F);\n\n      // Instrumented functions get a '.taint' sufffix.  This allows us to more\n      // easily identify cases of mismatching ABIs. This naming scheme is\n      // mangling-compatible (see Itanium ABI), using a vendor-specific suffix.\n      addGlobalNameSuffix(&F);\n    } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {\n      if (FT->isVarArg() && F.isDeclaration() && F.hasAddressTaken() &&\n          !isInstrumented(&F)) {\n        // FIXME: vararg functions do used as indirect call targets\n        *FI = nullptr;\n        continue;\n      }\n\n      // Build a wrapper function for F.  The wrapper simply calls F, and is\n      // added to FnsToInstrument so that any instrumentation according to its\n      // WrapperKind is done in the second pass below.\n\n      // If the function being wrapped has local linkage, then preserve the\n      // function's linkage in the wrapper function.\n      GlobalValue::LinkageTypes wrapperLinkage =\n          F.hasLocalLinkage() ? F.getLinkage()\n                              : GlobalValue::LinkOnceODRLinkage;\n\n      Function *NewF = buildWrapperFunction(\n          &F,\n          std::string(\"dfsw$\") + std::string(F.getName()),\n          wrapperLinkage, FT);\n      NewF->removeFnAttrs(ReadOnlyNoneAttrs);\n\n      Value *WrappedFnCst =\n          ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));\n      F.replaceAllUsesWith(WrappedFnCst);\n\n      UnwrappedFnMap[WrappedFnCst] = &F;\n      *FI = NewF;\n\n      if (!F.isDeclaration()) {\n        // This function is probably defining an interposition of an\n        // uninstrumented function and hence needs to keep the original ABI.\n        // But any functions it may call need to use the instrumented ABI, so\n        // we instrument it in a mode which preserves the original ABI.\n        FnsWithNativeABI.insert(&F);\n\n        // This code needs to rebuild the iterators, as they may be invalidated\n        // by the push_back, taking care that the new range does not include\n        // any functions added by this code.\n        size_t N = FI - FnsToInstrument.begin(),\n               Count = FE - FnsToInstrument.begin();\n        FnsToInstrument.push_back(&F);\n        FI = FnsToInstrument.begin() + N;\n        FE = FnsToInstrument.begin() + Count;\n      }\n      // Hopefully, nobody will try to indirectly call a vararg\n      // function... yet.\n    } else if (FT->isVarArg()) {\n      UnwrappedFnMap[&F] = &F;\n      *FI = nullptr;\n    }\n  }\n\n  for (Function *F : FnsToInstrument) {\n    if (!F || F->isDeclaration())\n      continue;\n\n    addContextRecording(*F);\n    if (!F->getName().startswith(\"dfsw$\"))\n      addFrameTracing(*F);\n    removeUnreachableBlocks(*F);\n\n    TaintFunction TF(*this, F, FnsWithNativeABI.count(F),\n                     FnsWithForceZeroLabel.count(F));\n\n    // TaintVisitor may create new basic blocks, which confuses df_iterator.\n    // Build a copy of the list before iterating over it.\n    SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock()));\n\n    for (BasicBlock *BB : BBList) {\n      // check for loop header\n      if (ClTraceLoop) {\n        if (TF.LI->isLoopHeader(BB)) {\n          // This is a loop header\n          Instruction *FI = &*(BB->getFirstInsertionPt());\n          ConstantInt *CID = ConstantInt::get(Int32Ty, getInstructionId(FI));\n          ConstantInt *LoopDepth = ConstantInt::get(Int32Ty, TF.LI->getLoopDepth(BB));\n          IRBuilder<> IRB(FI);\n          IRB.CreateCall(TaintTraceLoopFn, {CID, LoopDepth});\n        }\n        Loop *L = TF.LI->getLoopFor(BB);\n        if (L) {\n          for (BasicBlock *Succ : successors(BB)) {\n            if (!L->contains(Succ)) {\n              Instruction *FI = &*(Succ->getFirstInsertionPt());\n              IRBuilder<> IRB(FI);\n              ConstantInt *CID = ConstantInt::get(Int32Ty, getInstructionId(FI));\n              Loop *SuccL = TF.LI->getLoopFor(Succ);\n              int succ_depth = SuccL ? SuccL->getLoopDepth() : 0;\n              int depth = L->getLoopDepth();\n              ConstantInt *LoopDepth = ConstantInt::get(Int32Ty, succ_depth - depth);\n              IRB.CreateCall(TaintTraceLoopFn, {CID, LoopDepth});\n            }\n          }\n        }\n      }\n      Instruction *Inst = &BB->front();\n      while (true) {\n        // TaintVisitor may split the current basic block, changing the current\n        // instruction's next pointer and moving the next instruction to the\n        // tail block from which we should continue.\n        Instruction *Next = Inst->getNextNode();\n        // TaintVisitor may delete Inst, so keep track of whether it was a\n        // terminator.\n        bool IsTerminator = Inst->isTerminator();\n        if (!TF.SkipInsts.count(Inst))\n          TaintVisitor(TF).visit(Inst);\n        if (IsTerminator)\n          break;\n        Inst = Next;\n      }\n    }\n\n    // We will not necessarily be able to compute the shadow for every phi node\n    // until we have visited every block.  Therefore, the code that handles phi\n    // nodes adds them to the PHIFixups list so that they can be properly\n    // handled here.\n    for (auto &P : TF.PHIFixups) {\n      for (unsigned Val = 0, N = P.Phi->getNumIncomingValues(); Val != N;\n           ++Val) {\n        P.ShadowPhi->setIncomingValue(\n            Val, TF.getShadow(P.Phi->getIncomingValue(Val)));\n      }\n    }\n  }\n\n  return Changed || !FnsToInstrument.empty() ||\n         M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize;\n}\n\nValue *TaintFunction::getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB) {\n  Value *Base = IRB.CreatePointerCast(TT.ArgTLS, TT.IntptrTy);\n  if (ArgOffset)\n    Base = IRB.CreateAdd(Base, ConstantInt::get(TT.IntptrTy, ArgOffset));\n  return IRB.CreateIntToPtr(Base, PointerType::get(TT.getShadowTy(T), 0),\n                            \"_dfsarg\"); \n}\n\nValue *TaintFunction::getRetvalTLS(Type *T, IRBuilder<> &IRB) {\n  return IRB.CreatePointerCast(\n      TT.RetvalTLS, PointerType::get(TT.getShadowTy(T), 0), \"_dfsret\");\n}\n\nValue *TaintFunction::getShadowForTLSArgument(Argument *A) {\n  unsigned ArgOffset = 0;\n  const DataLayout &DL = F->getParent()->getDataLayout();\n  for (auto &FArg : F->args()) {\n    if (!FArg.getType()->isSized()) {\n      if (A == &FArg)\n        break;\n      continue;\n    }\n\n    unsigned Size = DL.getTypeAllocSize(TT.getShadowTy(&FArg));\n    if (A != &FArg) {\n      ArgOffset += alignTo(Size, ShadowTLSAlignment);\n      if (ArgOffset > ArgTLSSize)\n        break; // ArgTLS overflows, uses a zero shadow.\n      continue;\n    }\n\n    if (ArgOffset + Size > ArgTLSSize)\n      break; // ArgTLS overflows, uses a zero shadow.\n\n    Instruction *ArgTLSPos = &*F->getEntryBlock().begin();\n    IRBuilder<> IRB(ArgTLSPos);\n    Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB);\n    return IRB.CreateAlignedLoad(TT.getShadowTy(&FArg), ArgShadowPtr,\n                                 ShadowTLSAlignment);\n  }\n\n  return TT.getZeroShadow(A);\n}\n\nValue *TaintFunction::getShadowForGlobal(GlobalVariable *GV, IRBuilder<> &IRB) {\n  Type *T = GV->getValueType();\n  if (!T && GV->hasInitializer()) {\n    T = GV->getInitializer()->getType();\n  }\n  if (T && (T->isArrayTy() || T->isStructTy())) {\n    Module *M = F->getParent();\n    auto &DL = M->getDataLayout();\n    uint64_t size = T->isSized() ? DL.getTypeAllocSize(T) : 1; // FIXME: default size?\n    Value *Size = ConstantInt::get(TT.Int64Ty, size);\n    Value *Addr = IRB.CreatePtrToInt(GV, TT.Int64Ty);\n    return IRB.CreateCall(TT.TaintTraceGlobalFn, {Addr, Size});\n  }\n  return TT.ZeroPrimitiveShadow; // GV is always a ptr\n}\n\nValue *TaintFunction::getShadow(Value *V) {\n  if (!isa<Argument>(V) && !isa<Instruction>(V))\n    return TT.getZeroShadow(V);\n  if (IsForceZeroLabels)\n    return TT.getZeroShadow(V);\n  Value *&Shadow = ValShadowMap[V];\n  if (!Shadow) {\n    if (Argument *A = dyn_cast<Argument>(V)) {\n      if (IsNativeABI)\n        return TT.getZeroShadow(V);\n      Shadow = getShadowForTLSArgument(A);\n      NonZeroChecks.push_back(Shadow);\n    } else {\n      Shadow = TT.getZeroShadow(V);\n    }\n  }\n  return Shadow;\n}\n\nvoid TaintFunction::setShadow(Instruction *I, Value *Shadow) {\n  assert(!ValShadowMap.count(I));\n  ValShadowMap[I] = Shadow;\n}\n\n/// Compute the integer shadow offset that corresponds to a given\n/// application address.\n///\n/// Offset = (Addr & ~AndMask) ^ XorMask\nValue *Taint::getShadowOffset(Value *Addr, IRBuilder<> &IRB) {\n  assert(Addr != RetvalTLS && \"Reinstrumenting?\");\n  Value *OffsetLong = IRB.CreatePointerCast(Addr, IntptrTy);\n  if (ShadowPtrAndMask)\n    OffsetLong = IRB.CreateAnd(OffsetLong, ShadowPtrAndMask);\n  if (ShadowPtrXorMask)\n    OffsetLong = IRB.CreateXor(OffsetLong, ShadowPtrXorMask);\n  return OffsetLong;\n}\n\nValue *Taint::getShadowAddress(Value *Addr, IRBuilder<> &IRB) {\n  Value *ShadowLong = getShadowOffset(Addr, IRB);\n  if (ShadowPtrBase)\n    ShadowLong = IRB.CreateAdd(ShadowLong, ShadowPtrBase);\n  if (ShadowPtrMul)\n    ShadowLong = IRB.CreateMul(ShadowLong, ShadowPtrMul);\n  return IRB.CreateIntToPtr(ShadowLong, PrimitiveShadowPtrTy);\n}\n\nstatic inline bool isConstantOne(const Value *V) {\n  if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))\n    return CI->isOne();\n  return false;\n}\n\nValue *TaintFunction::combineBinaryOperatorShadows(BinaryOperator *BO,\n                                                   uint8_t op) {\n  if (BO->getType()->isIntegerTy(1) &&\n      BO->getOpcode() == Instruction::Xor &&\n      (isConstantOne(BO->getOperand(1)) ||\n       isConstantOne(BO->getOperand(0)))) {\n    op = 1; // __dfsan::Not\n  }\n  // else if (BinaryOperator::isNeg(BO))\n  //   op = 2;\n  Value *Shadow1 = getShadow(BO->getOperand(0));\n  Value *Shadow2 = getShadow(BO->getOperand(1));\n  Value *Shadow = combineShadows(Shadow1, Shadow2, op, BO);\n  return Shadow;\n}\n\nValue *TaintFunction::combineShadows(Value *V1, Value *V2,\n                                     uint16_t op,\n                                     Instruction *Pos) {\n  if (TT.isZeroShadow(V1) && TT.isZeroShadow(V2)) return V1;\n\n  // filter types\n  Type *Ty = Pos->getOperand(0)->getType();\n  if (Ty->isFloatingPointTy()) {\n    // check for FP\n    if (!ClTraceFP)\n      return TT.getZeroShadow(Pos);\n  } else if (Ty->isVectorTy()) {\n    // FIXME: vector type\n    return TT.getZeroShadow(Pos);\n  } else if (!Ty->isIntegerTy() && !Ty->isPointerTy()) {\n    // not FP and not vector and not int and not ptr?\n    errs() << \"Unknown type: \" << *Pos << \"\\n\";\n    return TT.getZeroShadow(Pos);\n  }\n\n  // filter size\n  auto &DL = Pos->getModule()->getDataLayout();\n  uint64_t size = DL.getTypeSizeInBits(Pos->getType());\n  // FIXME: do not handle type larger than 64-bit\n  if (size > 64) return TT.getZeroShadow(Pos);\n\n  IRBuilder<> IRB(Pos);\n  if (CmpInst *CI = dyn_cast<CmpInst>(Pos)) { // for both icmp and fcmp\n    size = DL.getTypeSizeInBits(CI->getOperand(0)->getType());\n    // op should be predicate\n    op |= (CI->getPredicate() << 8);\n  }\n  Value *Op = ConstantInt::get(TT.Int16Ty, op);\n  Value *Size = ConstantInt::get(TT.Int16Ty, size);\n  Value *Op1 = Pos->getOperand(0);\n  Ty = Op1->getType();\n  // bitcast to integer before extending\n  if (Ty->isHalfTy())\n    Op1 = IRB.CreateBitCast(Op1, TT.Int16Ty);\n  else if (Ty->isFloatTy())\n    Op1 = IRB.CreateBitCast(Op1, TT.Int32Ty);\n  else if (Ty->isDoubleTy())\n    Op1 = IRB.CreateBitCast(Op1, TT.Int64Ty);\n  else if (Ty->isPointerTy())\n    Op1 = IRB.CreatePtrToInt(Op1, TT.Int64Ty);\n  Op1 = IRB.CreateZExtOrTrunc(Op1, TT.Int64Ty);\n  Value *Op2 = ConstantInt::get(TT.Int64Ty, 0);\n  if (Pos->getNumOperands() > 1) {\n    Op2 = Pos->getOperand(1);\n    Ty = Op2->getType();\n    // bitcast to integer before extending\n    if (Ty->isHalfTy())\n      Op2 = IRB.CreateBitCast(Op2, TT.Int16Ty);\n    else if (Ty->isFloatTy())\n      Op2 = IRB.CreateBitCast(Op2, TT.Int32Ty);\n    else if (Ty->isDoubleTy())\n      Op2 = IRB.CreateBitCast(Op2, TT.Int64Ty);\n    else if (Ty->isPointerTy())\n      Op2 = IRB.CreatePtrToInt(Op2, TT.Int64Ty);\n    Op2 = IRB.CreateZExtOrTrunc(Op2, TT.Int64Ty);\n  }\n  CallInst *Call = IRB.CreateCall(TT.TaintUnionFn, {V1, V2, Op, Size, Op1, Op2});\n  Call->addRetAttr(Attribute::ZExt);\n  Call->addParamAttr(0, Attribute::ZExt);\n  Call->addParamAttr(1, Attribute::ZExt);\n  return Call;\n}\n\nValue *TaintFunction::combineCastInstShadows(CastInst *CI,\n                                             uint8_t op) {\n  Value *Shadow1 = getShadow(CI->getOperand(0));\n  Value *Shadow2 = TT.getZeroShadow(CI);\n  if (op == Instruction::BitCast) {\n    // BitCast is a no-op, so we can just return the shadow of the operand.\n    return Shadow1;\n  } else if (op == Instruction::AddrSpaceCast) {\n    // AddrSpaceCast is also a no-op for taint, so we can just return the shadow\n    // of the operand.\n    return TT.ZeroPrimitiveShadow;\n  } else {\n    return combineShadows(Shadow1, Shadow2, op, CI);\n  }\n}\n\nValue *TaintFunction::combineCmpInstShadows(CmpInst *CI,\n                                            uint8_t op) {\n  Value *Shadow1 = getShadow(CI->getOperand(0));\n  Value *Shadow2 = getShadow(CI->getOperand(1));\n  Value *Shadow = combineShadows(Shadow1, Shadow2, op, CI);\n  return Shadow;\n}\n\nAlign TaintFunction::getShadowAlign(Align InstAlignment) {\n  const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1);\n  return Align(Alignment.value() * TT.ShadowWidthBytes);\n}\n\nvoid TaintFunction::checkBounds(Value *Ptr, Value* Size, Instruction *Pos) {\n  IRBuilder<> IRB(Pos);\n  // another place to check for global variable as the ptr\n  Value *PtrShadow = nullptr;\n  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr->stripPointerCasts())) {\n    PtrShadow = getShadowForGlobal(GV, IRB);\n  } else {\n    PtrShadow = getShadow(Ptr);\n  }\n  Value *SizeShadow = getShadow(Size);\n  // ptr shadow only exists for array and heap object\n  if (!TT.isZeroShadow(PtrShadow)) {\n    Value *Addr = IRB.CreatePtrToInt(Ptr, TT.Int64Ty);\n    Value *Size64 = IRB.CreateZExtOrTrunc(Size, TT.Int64Ty);\n    IRB.CreateCall(TT.TaintCheckBoundsFn, {PtrShadow, Addr, SizeShadow, Size});\n  }\n}\n\nvoid TaintFunction::solveBounds(Value *Ptr, Value* Size, Instruction *Pos) {\n  Value *SizeShadow = getShadow(Size);\n  if (TT.isZeroShadow(SizeShadow)) {\n    // If the size is not symbolic, we cannot check if it can go out of bounds.\n    return;\n  }\n  IRBuilder<> IRB(Pos);\n  // another place to check for global variable as the ptr\n  Value *PtrShadow = nullptr;\n  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr->stripPointerCasts())) {\n    PtrShadow = getShadowForGlobal(GV, IRB);\n  } else {\n    PtrShadow = getShadow(Ptr);\n  }\n  Value *Addr = IRB.CreatePtrToInt(Ptr, TT.Int64Ty);\n  Value *Size64 = IRB.CreateZExtOrTrunc(Size, TT.Int64Ty);\n  ConstantInt *CID = ConstantInt::get(TT.Int32Ty, TT.getInstructionId(Pos));\n  IRB.CreateCall(TT.TaintSolveSizeFn,\n      {PtrShadow, Addr, SizeShadow, Size64, CID});\n}\n\n// Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where\n// Addr has alignment Align, and take the union of each of those shadows.\nValue *TaintFunction::loadPrimitiveShadow(Value *Addr, uint64_t Size,\n                                          uint64_t Align, IRBuilder<> &IRB) {\n  if (Size == 0)\n    return TT.ZeroPrimitiveShadow;\n\n  Value *ShadowAddr = TT.getShadowAddress(Addr, IRB);\n  CallInst *FallbackCall = IRB.CreateCall(\n      TT.TaintUnionLoadFn, {ShadowAddr, ConstantInt::get(TT.IntptrTy, Size),\n                            ConstantInt::get(TT.IntptrTy, Align)});\n  FallbackCall->addRetAttr(Attribute::ZExt);\n  return FallbackCall;\n}\n\nvoid TaintFunction::loadShadowRecursive(\n    Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubTy,\n    Value *Addr, uint64_t Size, uint64_t Align, IRBuilder<> &IRB) {\n  auto &DL = F->getParent()->getDataLayout();\n\n  if (!isa<ArrayType>(SubTy) && !isa<StructType>(SubTy)) {\n    uint64_t SubSize = DL.getTypeStoreSize(SubTy);\n    assert(Size >= SubSize);\n    Align = std::min(Align, (uint64_t)DL.getABITypeAlignment(SubTy));\n    // load a primitive shadow from address\n    Value *PrimitiveShadow = loadPrimitiveShadow(Addr, SubSize, Align, IRB);\n    // then insert the primitive shadow into the sub-field\n    IRB.CreateInsertValue(Shadow, PrimitiveShadow, Indices);\n    return;\n  }\n\n  if (ArrayType *AT = dyn_cast<ArrayType>(SubTy)) {\n    for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) {\n      Indices.push_back(Idx);\n      // double check the remaining size\n      Type *ElemTy = AT->getElementType();\n      uint64_t ElemSize = DL.getTypeStoreSize(ElemTy);\n      uint64_t Offset = ElemSize * Idx;\n      assert(Offset <= Size);\n      // get the address of the array element\n      Value *SubAddr = IRB.CreateConstGEP2_32(AT, Addr, 0, Idx);\n      loadShadowRecursive(Shadow, Indices, ElemTy,\n                          SubAddr, Size - Offset, Align, IRB);\n      Indices.pop_back();\n    }\n    return;\n  }\n\n  if (StructType *ST = dyn_cast<StructType>(SubTy)) {\n    const StructLayout *SL = DL.getStructLayout(ST);\n    for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) {\n      Indices.push_back(Idx);\n      // double check the remaining size\n      uint64_t Offset = SL->getElementOffset(Idx);\n      assert(Offset <= Size);\n      Type *ElemTy = ST->getElementType(Idx);\n      // get the address of the struct field\n      Value *SubAddr = IRB.CreateConstGEP2_32(ST, Addr, 0, Idx);\n      loadShadowRecursive(Shadow, Indices, ElemTy,\n                          SubAddr, Size - Offset, Align, IRB);\n      Indices.pop_back();\n    }\n    return;\n  }\n  llvm_unreachable(\"Unexpected shadow type\");\n}\n\nValue *TaintFunction::loadShadow(Type *T, Value *Addr, uint64_t Size,\n                                 Align Alignment, Instruction *Pos) {\n  IRBuilder<> IRB(Pos);\n  // if loading from a local variable, load label from its shadow\n  if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {\n    const auto i = AllocaShadowMap.find(AI);\n    if (i != AllocaShadowMap.end()) {\n      return IRB.CreateLoad(TT.getShadowTy(T), i->second);\n    }\n  }\n\n  // check if the target object is a constant\n  SmallVector<const Value *, 2> Objs;\n  getUnderlyingObjects(Addr, Objs);\n  bool AllConstants = true;\n  for (const Value *Obj : Objs) {\n    if (isa<Function>(Obj) || isa<BlockAddress>(Obj))\n      continue;\n    if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())\n      continue;\n\n    AllConstants = false;\n    break;\n  }\n  if (AllConstants)\n    return TT.getZeroShadow(T);\n\n  if (Size == 0)\n    return TT.ZeroPrimitiveShadow;\n\n  const uint64_t ShadowAlign = getShadowAlign(Alignment).value();\n\n  // now check if we're loading an aggragate object\n  if (!isa<ArrayType>(T) && !isa<StructType>(T))\n    return loadPrimitiveShadow(Addr, Size, ShadowAlign, IRB);\n\n  // if loading an aggregate object, load its shadow recursively\n  SmallVector<unsigned, 4> Indices;\n  Type *ShadowTy = TT.getShadowTy(T);\n  Value *Shadow = UndefValue::get(ShadowTy);\n  loadShadowRecursive(Shadow, Indices, T, Addr, Size, ShadowAlign, IRB);\n  return Shadow;\n}\n\nstatic AtomicOrdering addAcquireOrdering(AtomicOrdering AO) {\n  switch (AO) {\n  case AtomicOrdering::NotAtomic:\n    return AtomicOrdering::NotAtomic;\n  case AtomicOrdering::Unordered:\n  case AtomicOrdering::Monotonic:\n  case AtomicOrdering::Acquire:\n    return AtomicOrdering::Acquire;\n  case AtomicOrdering::Release:\n  case AtomicOrdering::AcquireRelease:\n    return AtomicOrdering::AcquireRelease;\n  case AtomicOrdering::SequentiallyConsistent:\n    return AtomicOrdering::SequentiallyConsistent;\n  }\n  llvm_unreachable(\"Unknown ordering\");\n}\n\nstatic AtomicOrdering addReleaseOrdering(AtomicOrdering AO) {\n  switch (AO) {\n  case AtomicOrdering::NotAtomic:\n    return AtomicOrdering::NotAtomic;\n  case AtomicOrdering::Unordered:\n  case AtomicOrdering::Monotonic:\n  case AtomicOrdering::Release:\n    return AtomicOrdering::Release;\n  case AtomicOrdering::Acquire:\n  case AtomicOrdering::AcquireRelease:\n    return AtomicOrdering::AcquireRelease;\n  case AtomicOrdering::SequentiallyConsistent:\n    return AtomicOrdering::SequentiallyConsistent;\n  }\n  llvm_unreachable(\"Unknown ordering\");\n}\n\nvoid TaintVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {\n  auto &DL = I.getModule()->getDataLayout();\n  Value *Ptr = I.getPointerOperand();\n  Value *Val = I.getValOperand();\n  Type *Ty = I.getType();\n  uint64_t Size = DL.getTypeStoreSize(Ty);\n\n  Value *Shadow1 = TF.loadShadow(Ty, Ptr, Size, I.getAlign(), &I);\n  Value *Shadow2 = TF.getShadow(Val);\n  Value *Shadow  = nullptr;\n  Value *Op1 = nullptr, *Cond = nullptr;\n  IRBuilder<> IRB(&I);\n\n  switch (I.getOperation()) {\n    case AtomicRMWInst::Xchg:\n      Shadow = Shadow2;\n      break;\n    case AtomicRMWInst::Add:\n      Shadow = TF.combineShadows(Shadow1, Shadow2, BinaryOperator::Add, &I);\n      break;\n    case AtomicRMWInst::Sub:\n      Shadow = TF.combineShadows(Shadow1, Shadow2, BinaryOperator::Sub, &I);\n      break;\n    case AtomicRMWInst::And:\n      Shadow = TF.combineShadows(Shadow1, Shadow2, BinaryOperator::And, &I);\n      break;\n    case AtomicRMWInst::Nand:\n      Shadow = TF.combineShadows(Shadow1, Shadow2, BinaryOperator::And, &I);\n      Shadow = TF.combineShadows(TF.TT.getZeroShadow(Ty), Shadow, 2, &I); // __dfsan::Neg\n      break;\n    case AtomicRMWInst::Or:\n      Shadow = TF.combineShadows(Shadow1, Shadow2, BinaryOperator::Or, &I);\n      break;\n    case AtomicRMWInst::Xor:\n      Shadow = TF.combineShadows(Shadow1, Shadow2, BinaryOperator::Xor, &I);\n      break;\n    case AtomicRMWInst::Max:\n      Op1 = IRB.CreateLoad(Ty, Ptr, true);\n      Cond = IRB.CreateICmpSGT(Op1, Val);\n      Shadow = IRB.CreateSelect(Cond, Shadow1, Shadow2);\n      break;\n    case AtomicRMWInst::Min:\n      Op1 = IRB.CreateLoad(Ty, Ptr, true);\n      Cond = IRB.CreateICmpSLT(Op1, Val);\n      Shadow = IRB.CreateSelect(Cond, Shadow1, Shadow2);\n      break;\n    case AtomicRMWInst::UMax:\n      Op1 = IRB.CreateLoad(Ty, Ptr, true);\n      Cond = IRB.CreateICmpUGT(Op1, Val);\n      Shadow = IRB.CreateSelect(Cond, Shadow1, Shadow2);\n      break;\n    case AtomicRMWInst::UMin:\n      Op1 = IRB.CreateLoad(Ty, Ptr, true);\n      Cond = IRB.CreateICmpULT(Op1, Val);\n      Shadow = IRB.CreateSelect(Cond, Shadow1, Shadow2);\n      break;\n    // TODO: support extra operations\n    default:\n      assert(false && \"unimplemented atomicrmw operation\");\n      break;\n  }\n\n  TF.storeShadow(Ptr, Ty, Size, I.getAlign(), Shadow, &I);\n  TF.setShadow(&I, Shadow1);\n\n  // TODO: The ordering change follows MSan. It is possible not to change\n  // ordering because we always set and use 0 shadows.\n  I.setOrdering(addReleaseOrdering(I.getOrdering()));\n}\n\nvoid TaintVisitor::visitLoadInst(LoadInst &LI) {\n  if (LI.getMetadata(\"nosanitize\")) return;\n  auto &DL = LI.getModule()->getDataLayout();\n  uint64_t Size = DL.getTypeStoreSize(LI.getType());\n  if (Size == 0) {\n    TF.setShadow(&LI, TF.TT.getZeroShadow(&LI));\n    return;\n  }\n\n  // When an application load is atomic, increase atomic ordering between\n  // atomic application loads and stores to ensure happen-before order; load\n  // shadow data after application data; store zero shadow data before\n  // application data. This ensure shadow loads return either labels of the\n  // initial application data or zeros.\n  if (LI.isAtomic())\n    LI.setOrdering(addAcquireOrdering(LI.getOrdering()));\n\n  Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI;\n\n  // check bounds first\n  if (ClTraceBound)\n    TF.checkBounds(LI.getPointerOperand(),\n                   ConstantInt::get(TF.TT.Int64Ty, Size), Pos);\n\n  Value *Shadow =\n      TF.loadShadow(LI.getType(), LI.getPointerOperand(), Size,\n                    LI.getAlign(), Pos);\n#if 0\n  //FIXME: tainted pointer\n  if (ClCombinePointerLabelsOnLoad) {\n    Value *PtrShadow = TF.getShadow(LI.getPointerOperand());\n    Shadow = TF.combineShadows(Shadow, PtrShadow, Pos);\n  }\n#endif\n  if (!TF.TT.isZeroShadow(Shadow))\n    TF.NonZeroChecks.push_back(Shadow);\n\n  TF.setShadow(&LI, Shadow);\n}\n\nvoid TaintFunction::storeShadowRecursive(\n    Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubShadowTy,\n    Value *Addr, uint64_t Size, uint64_t Align, IRBuilder<> &IRB) {\n  auto &DL = F->getParent()->getDataLayout();\n\n  if (!isa<ArrayType>(SubShadowTy) && !isa<StructType>(SubShadowTy)) {\n    uint64_t SubSize = DL.getTypeStoreSize(SubShadowTy);\n    assert(Size >= SubSize);\n    Align = std::min(Align, (uint64_t)DL.getABITypeAlignment(SubShadowTy));\n    // load a primitive shadow from the sub-field\n    Value *PrimitiveShadow = IRB.CreateExtractValue(Shadow, Indices);\n    // then store the primitive shadow into the shadow address\n    Value *ShadowAddr = TT.getShadowAddress(Addr, IRB);\n    IRB.CreateCall(TT.TaintUnionStoreFn,\n                   {PrimitiveShadow, ShadowAddr,\n                    ConstantInt::get(TT.IntptrTy, SubSize),\n                    ConstantInt::get(TT.IntptrTy, Align)});\n    return;\n  }\n\n  if (ArrayType *AT = dyn_cast<ArrayType>(SubShadowTy)) {\n    for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) {\n      Indices.push_back(Idx);\n      // double check the remaining size\n      Type *ElemTy = AT->getElementType();\n      uint64_t ElemSize = DL.getTypeStoreSize(ElemTy);\n      uint64_t Offset = ElemSize * Idx;\n      assert(Offset <= Size);\n      // get the address of the array element\n      Value *SubAddr = IRB.CreateConstGEP2_32(AT, Addr, 0, Idx);\n      storeShadowRecursive(Shadow, Indices, ElemTy,\n                           SubAddr, Size - Offset, Align, IRB);\n      Indices.pop_back();\n    }\n    return;\n  }\n\n  if (StructType *ST = dyn_cast<StructType>(SubShadowTy)) {\n    const StructLayout *SL = DL.getStructLayout(ST);\n    for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) {\n      Indices.push_back(Idx);\n      // double check the remaining size\n      uint64_t Offset = SL->getElementOffset(Idx);\n      assert(Offset <= Size);\n      Type *ElemTy = ST->getElementType(Idx);\n      // get the address of the struct field\n      Value *SubAddr = IRB.CreateConstGEP2_32(ST, Addr, 0, Idx);\n      storeShadowRecursive(Shadow, Indices, ElemTy,\n                           SubAddr, Size - Offset, Align, IRB);\n      Indices.pop_back();\n    }\n    return;\n  }\n  llvm_unreachable(\"Unexpected shadow type\");\n}\n\nvoid TaintFunction::storeShadow(Value *Addr, Type *T, uint64_t Size,\n                                Align Alignment, Value *Shadow,\n                                Instruction *Pos) {\n  IRBuilder<> IRB(Pos);\n  if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {\n    const auto i = AllocaShadowMap.find(AI);\n    if (i != AllocaShadowMap.end()) {\n      auto *SI = IRB.CreateStore(Shadow, i->second);\n      SkipInsts.insert(SI);\n      return;\n    }\n  }\n\n  Value *ShadowAddr = TT.getShadowAddress(Addr, IRB);\n  const Align ShadowAlign = getShadowAlign(Alignment);\n  // check if the shadow is zero, if so, clear the shadow memory regardless\n  // of the shadow type\n  if (TT.isZeroShadow(Shadow)) {\n    IntegerType *ShadowTy =\n        IntegerType::get(*TT.Ctx, Size * TT.ShadowWidthBits);\n    Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);\n    Value *ExtShadowAddr =\n        IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));\n    IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);\n    return;\n  }\n\n  // now check if we're storing an aggragate shadow object\n  if (!isa<ArrayType>(T) && !isa<StructType>(T)) {\n    IRB.CreateCall(TT.TaintUnionStoreFn,\n                   {Shadow, ShadowAddr, ConstantInt::get(TT.IntptrTy, Size),\n                    ConstantInt::get(TT.IntptrTy, ShadowAlign.value())});\n    return;\n  }\n\n  // if storing an aggregate shadow object, store its shadow recursively\n  // we want to do this so union_store may have a chance to simplify some\n  // constraints\n  SmallVector<unsigned, 4> Indices;\n  storeShadowRecursive(Shadow, Indices, T, Addr, Size,\n                       ShadowAlign.value(), IRB);\n}\n\nvoid TaintVisitor::visitStoreInst(StoreInst &SI) {\n  if (SI.getMetadata(\"nosanitize\")) return;\n\n  auto &DL = SI.getModule()->getDataLayout();\n  Value *Val = SI.getValueOperand();\n  Type* VT = SI.getValueOperand()->getType();\n  uint64_t Size = DL.getTypeStoreSize(VT);\n  if (Size == 0)\n    return;\n\n  // When an application store is atomic, increase atomic ordering between\n  // atomic application loads and stores to ensure happen-before order; load\n  // shadow data after application data; store zero shadow data before\n  // application data. This ensure shadow loads return either labels of the\n  // initial application data or zeros.\n  if (SI.isAtomic())\n    SI.setOrdering(addReleaseOrdering(SI.getOrdering()));\n\n  Value* Shadow = SI.isAtomic() ? TF.TT.getZeroShadow(VT) : TF.getShadow(Val);\n\n  // check bounds first\n  if (ClTraceBound)\n    TF.checkBounds(SI.getPointerOperand(),\n                   ConstantInt::get(TF.TT.Int64Ty, Size), &SI);\n\n#if 0\n  //FIXME: tainted pointer\n  if (ClCombinePointerLabelsOnStore) {\n    Value *PtrShadow = TF.getShadow(SI.getPointerOperand());\n    Shadow = TF.combineShadows(Shadow, PtrShadow, &SI);\n  }\n#endif\n  TF.storeShadow(SI.getPointerOperand(), VT, Size, SI.getAlign(), Shadow, &SI);\n}\n\n//void TaintVisitor::visitUnaryOperator(UnaryOperator &UO) {\n//}\n\nvoid TaintVisitor::visitBinaryOperator(BinaryOperator &BO) {\n  if (BO.getMetadata(\"nosanitize\")) return;\n  if (BO.getType()->isFloatingPointTy()) return;\n  Value *CombinedShadow =\n    TF.combineBinaryOperatorShadows(&BO, BO.getOpcode());\n  TF.setShadow(&BO, CombinedShadow);\n}\n\nvoid TaintVisitor::visitCastInst(CastInst &CI) {\n  if (CI.getMetadata(\"nosanitize\")) return;\n  // Special case: if this is the bitcast (there is exactly 1 allowed) between\n  // a musttail call and a ret, don't instrument. New instructions are not\n  // allowed after a musttail call.\n  if (auto *C = dyn_cast<CallInst>(CI.getOperand(0)))\n    if (C->isMustTailCall())\n      return;\n  Value *CombinedShadow =\n    TF.combineCastInstShadows(&CI, CI.getOpcode());\n  TF.setShadow(&CI, CombinedShadow);\n}\n\nvoid TaintFunction::visitCmpInst(CmpInst *I) {\n  // get operand\n  Value *Op1 = I->getOperand(0);\n  Value *Op2 = I->getOperand(1);\n  Value *Op1Shadow = getShadow(Op1);\n  Value *Op2Shadow = getShadow(Op2);\n  if (TT.isZeroShadow(Op1Shadow) && TT.isZeroShadow(Op2Shadow))\n    return;\n\n  Module *M = F->getParent();\n  auto &DL = M->getDataLayout();\n  unsigned size = DL.getTypeSizeInBits(Op1->getType());\n\n  IRBuilder<> IRB(I);\n  Op1 = IRB.CreateZExtOrTrunc(Op1, TT.Int64Ty);\n  Op2 = IRB.CreateZExtOrTrunc(Op2, TT.Int64Ty);\n  ConstantInt *Size = ConstantInt::get(TT.Int32Ty, size);\n  ConstantInt *Predicate = ConstantInt::get(TT.Int32Ty, I->getPredicate());\n  ConstantInt *CID = ConstantInt::get(TT.Int32Ty, TT.getInstructionId(I));\n\n  IRB.CreateCall(TT.TaintTraceCmpFn, {Op1Shadow, Op2Shadow, Size, Predicate,\n                 Op1, Op2, CID});\n}\n\nvoid TaintVisitor::visitCmpInst(CmpInst &CI) {\n  if (CI.getMetadata(\"nosanitize\")) return;\n  // FIXME: integer only now\n  if (!ClTraceFP && !isa<ICmpInst>(CI)) return;\n#if 0 //TODO make an option\n  TF.visitCmpInst(&CI);\n#endif\n  Value *CombinedShadow =\n    TF.combineCmpInstShadows(&CI, CI.getOpcode());\n  TF.setShadow(&CI, CombinedShadow);\n}\n\nvoid TaintFunction::visitSwitchInst(SwitchInst *I) {\n  Module *M = F->getParent();\n  auto &DL = M->getDataLayout();\n  // get operand\n  Value *Cond = I->getCondition();\n  Value *CondShadow = getShadow(Cond);\n  if (TT.isZeroShadow(CondShadow))\n    return;\n  uint32_t cid = TT.getInstructionId(I);\n  if (cid == TT.InvalidInstructionId)\n    return;\n  unsigned size = DL.getTypeSizeInBits(Cond->getType());\n  ConstantInt *Size = ConstantInt::get(TT.Int32Ty, size);\n  ConstantInt *Predicate = ConstantInt::get(TT.Int32Ty, 32); // EQ, ==\n  ConstantInt *CID = ConstantInt::get(TT.Int32Ty, cid);\n\n  IRBuilder<> IRB(I);\n  for (auto C : I->cases()) {\n    Value *CV = C.getCaseValue();\n\n    Cond = IRB.CreateZExtOrTrunc(Cond, TT.Int64Ty);\n    CV = IRB.CreateZExtOrTrunc(CV, TT.Int64Ty);\n    IRB.CreateCall(TT.TaintTraceCmpFn, {CondShadow, TT.ZeroPrimitiveShadow,\n                   Size, Predicate, Cond, CV, CID});\n  }\n  IRB.CreateCall(TT.TaintTraceSwitchEndFn, {CID});\n}\n\nvoid TaintVisitor::visitSwitchInst(SwitchInst &SWI) {\n  if (SWI.getMetadata(\"nosanitize\")) return;\n  TF.visitSwitchInst(&SWI);\n}\n\nvoid TaintVisitor::visitLandingPadInst(LandingPadInst &LPI) {\n  // We do not need to track data through LandingPadInst.\n  //\n  // For the C++ exceptions, if a value is thrown, this value will be stored\n  // in a memory location provided by __cxa_allocate_exception(...) (on the\n  // throw side) or  __cxa_begin_catch(...) (on the catch side).\n  // This memory will have a shadow, so with the loads and stores we will be\n  // able to propagate labels on data thrown through exceptions, without any\n  // special handling of the LandingPadInst.\n  //\n  // The second element in the pair result of the LandingPadInst is a\n  // register value, but it is for a type ID and should never be tainted.\n  TF.setShadow(&LPI, TF.TT.getZeroShadow(&LPI));\n}\n\nvoid TaintFunction::visitGEPInst(GetElementPtrInst *I) {\n  Module *M = F->getParent();\n  auto &DL = M->getDataLayout();\n  int64_t CurrentOffset = 0;\n\n  IRBuilder<> IRB(I);\n  Value *Base = I->getPointerOperand();\n  Value *Bounds = TT.getZeroShadow(Base);\n  if (ClTraceBound) {\n    // get bounds info for base pointer\n    if (auto *GV = dyn_cast<GlobalVariable>(Base->stripPointerCasts())) {\n      // if the base pointer is a global variable\n      // we can't get its shadow from the shadow map\n      Bounds = getShadowForGlobal(GV, IRB);\n    } else {\n      Bounds = getShadow(Base);\n      if (TT.isZeroShadow(Bounds)) {\n        // try striping the pointer cast\n        Bounds = getShadow(Base->stripPointerCasts());\n      }\n    }\n  }\n\n  Type *POTy = I->getPointerOperandType();\n  Type *ETy = POTy;\n  for (auto &Idx: I->indices()) {\n    // reference: DataLayout::getIndexedOffsetInType\n    Value *Index = &*Idx;\n    if (StructType *STy = dyn_cast<StructType>(ETy)) {\n      // index into struct has to be constant\n      assert(isa<ConstantInt>(Index) && \"inllegal struct index\");\n      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();\n      const StructLayout *SL = DL.getStructLayout(STy);\n      CurrentOffset += SL->getElementOffset(FieldNo);\n      ETy = STy->getTypeAtIndex(FieldNo);\n    } else {\n      uint64_t NumElements = 0;\n      int64_t ElemSize = 0;\n      if (PointerType *PTy = dyn_cast<PointerType>(ETy)) {\n        assert(PTy == POTy && \"inllegal pointer index\");\n        ETy = I->getSourceElementType();\n        NumElements = 0; // we don't know the number of elements\n        ElemSize = DL.getTypeAllocSize(ETy);\n      } else if (ArrayType *ATy = dyn_cast<ArrayType>(ETy)) {\n        ETy = ATy->getElementType();\n        NumElements = ATy->getNumElements();\n        ElemSize = DL.getTypeAllocSize(ETy);\n      } else {\n        VectorType *VTy = dyn_cast<VectorType>(ETy);\n        assert(VTy && \"inllegal index type\");\n        ETy = VTy->getElementType();\n        NumElements = VTy->getElementCount().getFixedValue();\n        ElemSize = DL.getTypeStoreSize(ETy);\n        break;\n      }\n\n      if (isa<ConstantInt>(Index)) {\n        int64_t arrayIdx = cast<ConstantInt>(Index)->getSExtValue();\n        CurrentOffset += arrayIdx * ElemSize;\n      } else if (Index->getType()->isIntegerTy()) { // FIXEME: handle vector type\n        // non-constant index, check if it's tainted\n        Value *Shadow = getShadow(Index);\n        if (!TT.isZeroShadow(Shadow)) {\n          Index = IRB.CreateZExtOrTrunc(Index, TT.Int64Ty);\n          ConstantInt *Offset = ConstantInt::get(TT.Int64Ty, CurrentOffset);\n          ConstantInt *NE = ConstantInt::get(TT.Int64Ty, NumElements);\n          ConstantInt *ES = ConstantInt::get(TT.Int64Ty, ElemSize);\n          Value *Ptr = IRB.CreatePtrToInt(I->getPointerOperand(), TT.Int64Ty);\n          ConstantInt *CID = ConstantInt::get(TT.Int32Ty, TT.getInstructionId(I));\n          if (ClSolveUB) {\n            // check if index can go out of bounds\n            // -fsanitize=local-bounds\n            // must be added before tracing GEP, otherwise index_label == index\n            // will be added as nested constraint\n            IRB.CreateCall(TT.TaintSolveBoundsFn,\n                           {Bounds, Ptr, Shadow, Index, NE, ES, Offset, CID});\n          }\n          if (ClTraceGEPOffset) {\n            IRB.CreateCall(TT.TaintTraceGEPFn,\n                           {Bounds, Ptr, Shadow, Index, NE, ES, Offset, CID});\n          }\n        } else {\n          break;\n        }\n      }\n    }\n  }\n\n  if (ClTraceBound) {\n    // propagate bounds info\n    setShadow(I, Bounds);\n  }\n\n  // For constant offset GEPs on string op pointers, create fstr_off label\n  // to track the offset (e.g., sep + 1 where sep is from strchr)\n  if (CurrentOffset != 0 && !TT.isZeroShadow(Bounds)) {\n    IRBuilder<> IRB(I->getNextNode());\n    Bounds = IRB.CreateCall(TT.TaintTraceGEPPtrFn, {Bounds, I, Base});\n  }\n}\n\nvoid TaintVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {\n  if (!ClTraceGEPOffset && !ClTraceBound) return;\n  if (GEPI.getMetadata(\"nosanitize\")) return;\n  TF.visitGEPInst(&GEPI);\n}\n\nvoid TaintVisitor::visitExtractElementInst(ExtractElementInst &I) {\n  //FIXME:\n}\n\nvoid TaintVisitor::visitInsertElementInst(InsertElementInst &I) {\n  //FIXME:\n}\n\nvoid TaintVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {\n  //FIXME:\n}\n\nvoid TaintVisitor::visitExtractValueInst(ExtractValueInst &I) {\n  if (I.getMetadata(\"nosanitize\")) return;\n\n  IRBuilder<> IRB(&I);\n  Value *Agg = I.getAggregateOperand();\n  Value *AggShadow = TF.getShadow(Agg);\n  Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());\n  TF.setShadow(&I, ResShadow);\n}\n\nvoid TaintVisitor::visitInsertValueInst(InsertValueInst &I) {\n  if (I.getMetadata(\"nosanitize\")) return;\n\n  IRBuilder<> IRB(&I);\n  Value *AggShadow = TF.getShadow(I.getAggregateOperand());\n  Value *InsShadow = TF.getShadow(I.getInsertedValueOperand());\n  Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());\n  TF.setShadow(&I, Res);\n}\n\nValue *TaintFunction::visitAllocaInst(AllocaInst *I, Value *ArraySize,\n                                      Type *ElTy) {\n  // insert after the instruction to get the address\n  BasicBlock::iterator ip(I);\n  IRBuilder<> IRB(I->getParent(), ++ip);\n  // prepare array size\n  Value *Size = IRB.CreateZExtOrTrunc(ArraySize, TT.Int64Ty);\n  Value *SizeShadow = getShadow(ArraySize);\n  // get element size\n  Module *M = F->getParent();\n  auto &DL = M->getDataLayout();\n  uint64_t es = DL.getTypeAllocSize(ElTy);\n  ConstantInt *ElemSize = ConstantInt::get(TT.Int64Ty, es);\n  // get address\n  Value *Address = IRB.CreatePtrToInt(I, TT.Int64Ty);\n\n  return IRB.CreateCall(TT.TaintTraceAllocaFn,\n                        {SizeShadow, Size, ElemSize, Address});\n}\n\nvoid TaintVisitor::visitAllocaInst(AllocaInst &I) {\n  bool AllLoadsStores = true;\n  for (User *U : I.users()) {\n    if (isa<LoadInst>(U)) {\n      continue;\n    }\n    if (StoreInst *SI = dyn_cast<StoreInst>(U)) {\n      if (SI->getPointerOperand() == &I) {\n        continue;\n      }\n    }\n\n    AllLoadsStores = false;\n    break;\n  }\n  if (AllLoadsStores) {\n    IRBuilder<> IRB(&I);\n    AllocaInst *AI = IRB.CreateAlloca(TF.TT.getShadowTy(I.getAllocatedType()),\n                                      I.getArraySize(), I.getName() + \".taint\");\n    TF.AllocaShadowMap[&I] = AI;\n    if (ClTraceBound) {\n      // set shadow to uninit\n      IRB.CreateStore(TF.TT.getUninitializedShadow(I.getAllocatedType()), AI);\n    }\n  }\n  if (!ClTraceBound) {\n    TF.setShadow(&I, TF.TT.ZeroPrimitiveShadow);\n  } else {\n    Type *T = I.getAllocatedType();\n    Value *ArraySize = I.getArraySize();\n    bool TrackBounds = I.isArrayAllocation() | T->isArrayTy() | T->isStructTy();\n    if (TrackBounds) {\n      // array could be VLA, rely on runtime\n      Value *Bounds = TF.visitAllocaInst(&I, ArraySize, T);\n      TF.setShadow(&I, Bounds);\n    } else {\n      TF.setShadow(&I, TF.TT.ZeroPrimitiveShadow); // no bounds\n    }\n    // set uninit shadow for allocation with constant size\n    if (!AllLoadsStores && isa<ConstantInt>(ArraySize)) {\n      Value *Init = TF.TT.UninitializedPrimitiveShadow;\n      // XXX: skip __va_list_tag, as we don't trace llvm.va_start\n      if (ArrayType *AT = dyn_cast<ArrayType>(T)) {\n        T = AT->getElementType();\n      }\n      if (T->isStructTy() &&\n          T->getStructName().find(\"__va_list_tag\") != StringRef::npos) {\n        // FIXME: don't set uninit, assuming llvm.va_start will be called\n        Init = TF.TT.ZeroPrimitiveShadow;\n      }\n      // handle not all loads and stores cases here\n      IRBuilder<> IRB(I.getNextNode());\n      auto DL = I.getModule()->getDataLayout();\n      auto size = I.getAllocationSizeInBits(DL);\n      assert(size != None);\n      Value *Size =\n          ConstantInt::get(TF.TT.IntptrTy, (size->getFixedValue() + 7) >> 3);\n      IRB.CreateCall(TF.TT.TaintSetLabelFn,\n                     {Init,\n                      IRB.CreateBitCast(&I, Type::getInt8PtrTy(*TF.TT.Ctx)),\n                      Size});\n    }\n  }\n}\n\nValue* TaintFunction::visitSelectInst(Value *Cond, Value *TrueShadow,\n                                      Value *FalseShadow, SelectInst *I) {\n  Value *CondShadow = getShadow(Cond);\n  Type *T = I->getType();\n  if (!T->isIntegerTy(1)) {\n    // most cases\n    visitCondition(Cond, I);\n    return TrueShadow == FalseShadow ? TrueShadow :\n        SelectInst::Create(Cond, TrueShadow, FalseShadow, \"\", I);\n  }\n\n  // special case, when select is used to implement logical AND and OR\n  IRBuilder<> IRB(I);\n  Cond = IRB.CreateZExt(Cond, TT.Int8Ty);\n  Value *TrueVal = IRB.CreateZExt(I->getTrueValue(), TT.Int8Ty);\n  Value *FalseVal = IRB.CreateZExt(I->getFalseValue(), TT.Int8Ty);\n  ConstantInt *CID = ConstantInt::get(TT.Int32Ty, TT.getInstructionId(I));\n  return IRB.CreateCall(TT.TaintTraceSelectFn,\n                        {CondShadow, TrueShadow, FalseShadow, Cond,\n                         TrueVal, FalseVal, CID});\n}\n\nvoid TaintVisitor::visitSelectInst(SelectInst &I) {\n  Value *Condition = I.getCondition();\n  Value *TrueShadow = TF.getShadow(I.getTrueValue());\n  Value *FalseShadow = TF.getShadow(I.getFalseValue());\n\n  if (isa<VectorType>(Condition->getType())) {\n    //FIXME:\n    errs() << \"WARNING: vector condition in Select\" << I << \"\\n\";\n    TF.setShadow(&I, TF.TT.ZeroPrimitiveShadow);\n  } else {\n    Value *ShadowSel =\n        TF.visitSelectInst(Condition, TrueShadow, FalseShadow, &I);\n    TF.setShadow(&I, ShadowSel);\n  }\n}\n\nvoid TaintVisitor::visitMemSetInst(MemSetInst &I) {\n  // check bounds before memset\n  if (ClTraceBound) {\n    TF.checkBounds(I.getDest(), I.getLength(), &I);\n  }\n  if (ClSolveUB) {\n    TF.solveBounds(I.getDest(), I.getLength(), &I);\n  }\n  IRBuilder<> IRB(&I);\n  Value *ValShadow = TF.getShadow(I.getValue());\n  IRB.CreateCall(\n      TF.TT.TaintSetLabelFn,\n      {ValShadow,\n       IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*TF.TT.Ctx)),\n       IRB.CreateZExtOrTrunc(I.getLength(), TF.TT.IntptrTy)});\n}\n\nvoid TaintVisitor::visitMemTransferInst(MemTransferInst &I) {\n  // check bounds before memcpy\n  if (ClTraceBound) {\n    TF.checkBounds(I.getDest(), I.getLength(), &I);\n    TF.checkBounds(I.getSource(), I.getLength(), &I);\n  }\n  if (ClSolveUB) {\n    TF.solveBounds(I.getDest(), I.getLength(), &I);\n    TF.solveBounds(I.getSource(), I.getLength(), &I);\n  }\n  IRBuilder<> IRB(&I);\n  Value *DestShadow = TF.TT.getShadowAddress(I.getDest(), IRB);\n  Value *SrcShadow = TF.TT.getShadowAddress(I.getSource(), IRB);\n  Value *LenShadow = IRB.CreateMul(\n      I.getLength(),\n      ConstantInt::get(I.getLength()->getType(), TF.TT.ShadowWidthBytes));\n  Type *Int8Ptr = Type::getInt8PtrTy(*TF.TT.Ctx);\n  DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);\n  SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);\n  auto *MTI = cast<MemTransferInst>(\n      IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),\n                     {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));\n  if (ClPreserveAlignment) {\n    MTI->setDestAlignment(I.getDestAlign() * TF.TT.ShadowWidthBytes);\n    MTI->setSourceAlignment(I.getSourceAlign() * TF.TT.ShadowWidthBytes);\n  } else {\n    MTI->setDestAlignment(Align(TF.TT.ShadowWidthBytes));\n    MTI->setSourceAlignment(Align(TF.TT.ShadowWidthBytes));\n  }\n}\n\nstatic bool isAMustTailRetVal(Value *RetVal) {\n  // Tail call may have a bitcast between return.\n  if (auto *I = dyn_cast<BitCastInst>(RetVal)) {\n    RetVal = I->getOperand(0);\n  }\n  if (auto *I = dyn_cast<CallInst>(RetVal)) {\n    return I->isMustTailCall();\n  }\n  return false;\n}\n\nvoid TaintVisitor::visitReturnInst(ReturnInst &RI) {\n  Value *RV = RI.getReturnValue();\n  if (!TF.IsNativeABI && RV) {\n    // Don't emit the instrumentation for musttail call returns.\n    if (isAMustTailRetVal(RV))\n      return;\n\n    Value *S = TF.getShadow(RV);\n    IRBuilder<> IRB(&RI);\n    Type *RT = TF.F->getFunctionType()->getReturnType();\n    unsigned Size = getDataLayout().getTypeAllocSize(TF.TT.getShadowTy(RT));\n    if (Size <= RetvalTLSSize) {\n      // If the size overflows, stores nothing. At callsite, oversized return\n      // shadows are set to zero.\n      IRB.CreateAlignedStore(S, TF.getRetvalTLS(RT, IRB), ShadowTLSAlignment);\n    }\n  }\n}\n\nvoid TaintVisitor::addShadowArguments(Function *F, CallBase &CB,\n                                      std::vector<Value *> &Args,\n                                      IRBuilder<> &IRB) {\n  FunctionType *FT = F->getFunctionType();\n\n  auto *I = CB.arg_begin();\n\n  // Adds non-variable argument shadows.\n  for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {\n    // Finds potential shadow for GV\n    auto *GV = dyn_cast<GlobalVariable>((*I)->stripPointerCasts());\n    Value *Shadow = GV ? TF.getShadowForGlobal(GV, IRB)\n                       : TF.getShadow(*I);\n    Args.push_back(Shadow); // we don't collapse shadow\n  }\n\n  // Adds variable argument shadows.\n  if (FT->isVarArg()) {\n    auto *LabelVATy = ArrayType::get(TF.TT.PrimitiveShadowTy,\n                                     CB.arg_size() - FT->getNumParams());\n    auto *LabelVAAlloca =\n        new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),\n                       \"labelva\", &TF.F->getEntryBlock().front());\n\n    for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {\n      auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);\n      auto *GV = dyn_cast<GlobalVariable>((*I)->stripPointerCasts());\n      Value *Shadow = GV ? TF.getShadowForGlobal(GV, IRB)\n                         : TF.getShadow(*I);\n      IRB.CreateStore(Shadow, LabelVAPtr);\n    }\n\n    Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));\n  }\n\n  // Adds the return value shadow.\n  Type *RetTy = FT->getReturnType();\n  if (!RetTy->isVoidTy()) {\n    if (!TF.LabelReturnAlloca) {\n      TF.LabelReturnAlloca =\n          new AllocaInst(TF.TT.getShadowTy(RetTy), // we dont collapse shadow\n                         getDataLayout().getAllocaAddrSpace(),\n                         \"labelreturn\", &TF.F->getEntryBlock().front());\n    }\n    Args.push_back(TF.LabelReturnAlloca);\n  }\n}\n\nbool TaintVisitor::visitWrappedCallBase(Function *F, CallBase &CB) {\n  IRBuilder<> IRB(&CB);\n  Value *Shadow = nullptr;\n  FunctionType *FT = F->getFunctionType();\n  switch (TF.TT.getWrapperKind(F)) {\n  case Taint::WK_Warning:\n    CB.setCalledFunction(F);\n    IRB.CreateCall(TF.TT.TaintUnimplementedFn,\n                   IRB.CreateGlobalStringPtr(F->getName()));\n    TF.setShadow(&CB, TF.TT.getZeroShadow(&CB));\n    return true;\n  case Taint::WK_Discard:\n    CB.setCalledFunction(F);\n    TF.setShadow(&CB, TF.TT.getZeroShadow(&CB));\n    return true;\n  case Taint::WK_Functional:\n    CB.setCalledFunction(F);\n    //FIXME:\n    // visitOperandShadowInst(CS);\n    return true;\n  case Taint::WK_Memcmp: {\n    // int memcmp(const void *s1, const void *s2, size_t n)\n    assert(CB.arg_size() == 3 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_memcmp\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    // Add original arguments\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    // Add shadow arguments (including return label pointer)\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    // Load return shadow\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strcmp: {\n    // int strcmp(const char *s1, const char *s2)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strcmp\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strncmp: {\n    // int strncmp(const char *s1, const char *s2, size_t n)\n    assert(CB.arg_size() == 3 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strncmp\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strchr: {\n    // char *strchr(char *s, int c)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strchr\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strrchr: {\n    // char *strrchr(char *s, int c)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strrchr\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strstr: {\n    // char *strstr(char *haystack, char *needle)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strstr\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Prefixof: {\n    // int prefixof(const char *str, const char *prefix)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_prefixof\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Suffixof: {\n    // int suffixof(const char *str, const char *suffix)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_suffixof\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strcat: {\n    // char *strcat(char *dest, const char *src)\n    assert(CB.arg_size() == 2 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strcat\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Strsub: {\n    // char *strsub(char *s, size_t len)\n    assert(CB.arg_size() == 3 && !FT->getReturnType()->isVoidTy());\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    FunctionCallee DfswFn = TF.TT.Mod->getOrInsertFunction(\"__dfsw_strsub\", CustomFn.TransformedType);\n\n    std::vector<Value *> Args;\n    for (unsigned i = 0; i < FT->getNumParams(); i++)\n      Args.push_back(CB.getArgOperand(i));\n    addShadowArguments(F, CB, Args, IRB);\n\n    CallInst *CustomCI = IRB.CreateCall(DfswFn, Args);\n\n    LoadInst *LabelLoad = IRB.CreateLoad(TF.TT.getShadowTy(FT->getReturnType()), TF.LabelReturnAlloca);\n    TF.setShadow(CustomCI, LabelLoad);\n\n    CB.replaceAllUsesWith(CustomCI);\n    CB.eraseFromParent();\n    return true;\n  }\n  case Taint::WK_Custom:\n    // Don't try to handle invokes of custom functions, it's too complicated.\n    // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_\n    // wrapper.\n    CallInst *CI = dyn_cast<CallInst>(&CB);\n    if (!CI)\n      return false;\n\n    FunctionType *FT = F->getFunctionType();\n    TransformedFunction CustomFn = TF.TT.getCustomFunctionType(FT);\n    std::string CustomFName = \"__dfsw_\";\n    CustomFName += F->getName();\n    FunctionCallee CustomF =\n        TF.TT.Mod->getOrInsertFunction(CustomFName, CustomFn.TransformedType);\n    if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) {\n      CustomFn->copyAttributesFrom(F);\n\n      // Custom functions returning non-void will write to the return label.\n      if (!FT->getReturnType()->isVoidTy()) {\n        CustomFn->removeFnAttrs(TF.TT.ReadOnlyNoneAttrs);\n      }\n    }\n\n    std::vector<Value *> Args;\n\n    // Adds non-variable arguments.\n    auto *I = CB.arg_begin();\n    for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {\n      Type *T = (*I)->getType();\n      FunctionType *ParamFT;\n      if (isa<PointerType>(T) &&\n          (ParamFT = dyn_cast<FunctionType>(T->getPointerElementType()))) {\n        std::string TName = \"dfst\";\n        TName += utostr(FT->getNumParams() - N);\n        TName += \"$\";\n        TName += F->getName();\n        Constant *Trampoline =\n            TF.TT.getOrBuildTrampolineFunction(ParamFT, TName);\n        Args.push_back(Trampoline);\n        Args.push_back(\n            IRB.CreateBitCast(*I, Type::getInt8PtrTy(*TF.TT.Ctx)));\n      } else {\n        Args.push_back(*I);\n      }\n    }\n\n    // Adds shadow arguments.\n    const unsigned ShadowArgStart = Args.size();\n    addShadowArguments(F, CB, Args, IRB);\n\n    // Adds variable arguments.\n    append_range(Args, drop_begin(CB.args(), FT->getNumParams()));\n\n    CallInst *CustomCI = IRB.CreateCall(CustomF, Args);\n    CustomCI->setCallingConv(CI->getCallingConv());\n    CustomCI->setAttributes(TransformFunctionAttributes(\n        CustomFn, CI->getContext(), CI->getAttributes()));\n\n    // Update the parameter attributes of the custom call instruction to\n    // zero extend the shadow parameters. This is required for targets\n    // which consider ShadowTy an illegal type.\n    for (unsigned N = 0; N < FT->getNumParams(); N++) {\n      const unsigned ArgNo = ShadowArgStart + N;\n      if (CustomCI->getArgOperand(ArgNo)->getType() ==\n          TF.TT.PrimitiveShadowTy) {\n        CustomCI->addParamAttr(ArgNo, Attribute::ZExt);\n      }\n    }\n\n    // Loads the return value shadow and origin.\n    Type *RetTy = FT->getReturnType();\n    if (!RetTy->isVoidTy()) {\n      // we don't collapse shadow\n      LoadInst *LabelLoad =\n          IRB.CreateLoad(TF.TT.getShadowTy(RetTy), TF.LabelReturnAlloca);\n      TF.setShadow(CustomCI, LabelLoad);\n\n    }\n\n    CI->replaceAllUsesWith(CustomCI);\n    CI->eraseFromParent();\n    return true;\n  }\n  return false;\n}\n\nvoid TaintVisitor::visitIntrinsicCallBase(Function *F, CallBase &CB) {\n  // filter some obvious ones\n  StringRef FN = F->getName();\n  if (FN.startswith(\"llvm.va_\") || // varabile length\n      FN.startswith(\"llvm.gc\")  || // garbaage collection\n      FN.startswith(\"llvm.experimental\") ||\n      FN.startswith(\"llvm.lifetime\")\n     ) {\n    return;\n  }\n  // intrinsic, check argument\n  bool NeedsInstrumentation = false;\n  for (unsigned I = 0, N = CB.arg_size(); I < N; ++I) {\n    Value *Shadow = TF.getShadow(CB.getArgOperand(I));\n    if (!TF.TT.isZeroShadow(Shadow)) {\n      NeedsInstrumentation = true;\n      break;\n    }\n  }\n  if (!NeedsInstrumentation)\n    return;\n\n  // FIXME: track intrinsic\n  return;\n}\n\nvoid TaintVisitor::visitCallBase(CallBase &CB) {\n  if (CB.isInlineAsm()) {\n    // FIXME: inline asm\n    return;\n  }\n\n  // handle intrinsics\n  Function *F = CB.getCalledFunction();\n  if (F && F->isIntrinsic()) {\n    visitIntrinsicCallBase(F, CB);\n    return;\n  }\n\n  // Calls to this function are synthesized in wrappers, and we shouldn't\n  // instrument them.\n  if (F == TF.TT.TaintVarargWrapperFn.getCallee()->stripPointerCasts())\n    return;\n\n  IRBuilder<> IRB(&CB);\n\n  // trace indirect call\n  if (CB.getCalledFunction() == nullptr) {\n    Value *Shadow = TF.getShadow(CB.getCalledOperand());\n    if (!TF.TT.isZeroShadow(Shadow))\n      IRB.CreateCall(TF.TT.TaintTraceIndirectCallFn, {Shadow});\n  }\n\n  DenseMap<Value *, Function *>::iterator UnwrappedFnIt =\n      TF.TT.UnwrappedFnMap.find(CB.getCalledOperand());\n  if (UnwrappedFnIt != TF.TT.UnwrappedFnMap.end()) {\n    if (visitWrappedCallBase(UnwrappedFnIt->second, CB))\n      return;\n  }\n\n  // reset IRB\n  IRB.SetInsertPoint(&CB);\n\n  FunctionType *FT = CB.getFunctionType();\n  const DataLayout &DL = getDataLayout();\n\n  // Stores argument shadows.\n  unsigned ArgOffset = 0;\n  for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) {\n    unsigned Size =\n        DL.getTypeAllocSize(TF.TT.getShadowTy(FT->getParamType(I)));\n    // Stop storing if arguments' size overflows. Inside a function, arguments\n    // after overflow have zero shadow values.\n    if (ArgOffset + Size > ArgTLSSize)\n      break;\n    Value *Arg = CB.getArgOperand(I);\n    auto *GV = dyn_cast<GlobalVariable>(Arg->stripPointerCasts());\n    Value *Shadow = GV ? TF.getShadowForGlobal(GV, IRB)\n                       : TF.getShadow(Arg);\n    IRB.CreateAlignedStore(Shadow,\n                           TF.getArgTLS(FT->getParamType(I), ArgOffset, IRB),\n                           ShadowTLSAlignment);\n    ArgOffset += alignTo(Size, ShadowTLSAlignment);\n  }\n\n  Instruction *Next = nullptr;\n  if (!CB.getType()->isVoidTy()) {\n    if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {\n      if (II->getNormalDest()->getSinglePredecessor()) {\n        Next = &II->getNormalDest()->front();\n      } else {\n        BasicBlock *NewBB =\n            SplitEdge(II->getParent(), II->getNormalDest(), &TF.DT);\n        Next = &NewBB->front();\n      }\n    } else {\n      assert(CB.getIterator() != CB.getParent()->end());\n      Next = CB.getNextNode();\n    }\n\n    // Don't emit the epilogue for musttail call returns.\n    if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())\n      return;\n\n    // Loads the return value shadow.\n    IRBuilder<> NextIRB(Next);\n    unsigned Size = DL.getTypeAllocSize(TF.TT.getShadowTy(&CB));\n    if (Size > RetvalTLSSize) {\n      // Set overflowed return shadow to be zero.\n      TF.setShadow(&CB, TF.TT.getZeroShadow(&CB));\n    } else {\n      LoadInst *LI = NextIRB.CreateAlignedLoad(\n          TF.TT.getShadowTy(&CB), TF.getRetvalTLS(CB.getType(), NextIRB),\n          ShadowTLSAlignment, \"_dfsret\");\n      TF.SkipInsts.insert(LI);\n      TF.setShadow(&CB, LI);\n      TF.NonZeroChecks.push_back(LI);\n    }\n  }\n}\n\nvoid TaintVisitor::visitPHINode(PHINode &PN) {\n  Type *ShadowTy = TF.TT.getShadowTy(&PN);\n  PHINode *ShadowPN =\n      PHINode::Create(ShadowTy, PN.getNumIncomingValues(), \"\", &PN);\n\n  // Give the shadow phi node valid predecessors to fool SplitEdge into working.\n  Value *UndefShadow = UndefValue::get(ShadowTy);\n  for (BasicBlock *BB : PN.blocks())\n    ShadowPN->addIncoming(UndefShadow, BB);\n\n  TF.setShadow(&PN, ShadowPN);\n  TF.PHIFixups.push_back({&PN, ShadowPN});\n}\n\nstatic inline bool isLoopLatch(const BasicBlock *BB, const BasicBlock *Header) {\n  const BasicBlock *Succ = nullptr;\n  SmallVector<const BasicBlock*> Visited;\n  while (BB != Header) {\n    Visited.push_back(BB);\n    if ((Succ = BB->getSingleSuccessor()) == nullptr)\n      return false;\n    BB = Succ;\n    if (Visited.end() != std::find(Visited.begin(), Visited.end(), BB))\n      return false; // found a cycle\n  }\n  return true;\n}\n\nvoid TaintFunction::visitCondition(Value *Condition, Instruction *I) {\n  IRBuilder<> IRB(I);\n  // get operand\n  Value *Shadow = getShadow(Condition);\n  uint8_t flag = 0;\n  if (ClTraceLoop && isa<BranchInst>(I)) {\n    // check loop exit and latch\n    BasicBlock *BB = I->getParent();\n    Loop *L = LI->getLoopFor(BB);\n    if (L) {\n      BranchInst *BI = cast<BranchInst>(I);\n      BasicBlock *TB = I->getSuccessor(0); // true branch\n      BasicBlock *FB = I->getSuccessor(1); // false branch\n      if (isLoopLatch(TB, L->getHeader())) // True branch loop latch\n        flag |= TrueBranchLoopLatch; // return to the loop header\n      if (isLoopLatch(FB, L->getHeader())) // False branch loop latch\n        flag |= FalseBranchLoopLatch; // return to the loop header\n      if (!L->contains(TB)) // True branch loop exit\n        flag |= TrueBranchLoopExit;\n      if (!L->contains(FB)) // False branch loop exit\n        flag |= FalseBranchLoopExit;\n    }\n  }\n  // we are not interested if the condition is not tainted,\n  // except for loop exit\n  if (TT.isZeroShadow(Shadow) && (flag & LoopExitBranch) == 0)\n    return;\n  uint32_t cid = TT.getInstructionId(I);\n  if (cid == TT.InvalidInstructionId)\n    return; // XXX: forget about loop?\n  ConstantInt *LF = ConstantInt::get(TT.Int8Ty, flag);\n  ConstantInt *CID = ConstantInt::get(TT.Int32Ty, cid);\n  IRB.CreateCall(TT.TaintTraceCondFn, {Shadow, Condition, LF, CID});\n}\n\nvoid TaintVisitor::visitBranchInst(BranchInst &BR) {\n  if (BR.getMetadata(\"nosanitize\")) return;\n  if (BR.isUnconditional()) return;\n  TF.visitCondition(BR.getCondition(), &BR);\n}\n\nnamespace {\nclass TaintPass : public PassInfoMixin<TaintPass> {\nprivate:\n  std::vector<std::string> ABIListFiles;\n\npublic:\n  TaintPass(\n      const std::vector<std::string> &ABIListFiles = std::vector<std::string>())\n      : ABIListFiles(ABIListFiles) {}\n  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM) {\n    if (Taint(ABIListFiles).runImpl(M)) {\n      return PreservedAnalyses::none();\n    }\n    return PreservedAnalyses::all();\n  }\n\n  static bool isRequired() { return true; }\n};\n}\n\nextern \"C\" ::llvm::PassPluginLibraryInfo LLVM_ATTRIBUTE_WEAK\nllvmGetPassPluginInfo() {\n  return {LLVM_PLUGIN_API_VERSION, \"TaintPass\", \"v1.1\",\n          [](PassBuilder &PB) {\n            PB.registerOptimizerLastEPCallback(\n                [](ModulePassManager &MPM, OptimizationLevel OL) {\n                  MPM.addPass(TaintPass());\n                });\n            PB.registerPipelineParsingCallback(\n                [](StringRef Name, ModulePassManager &MPM,\n                   ArrayRef<PassBuilder::PipelineElement>) {\n                  if (Name == \"taint\") {\n                    MPM.addPass(TaintPass());\n                    return true;\n                  }\n                  return false;\n                });\n          }};\n}\n"
  },
  {
    "path": "parsers/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 17)\n\nfind_package(boost_container CONFIG)\n\nif (NOT boost_container_FOUND)\n    message(FATAL_ERROR \"Failed to locate Boost\")\nendif()\n\n## parser\nadd_library(rgd-parser STATIC rgd-parser.cpp)\ntarget_include_directories(rgd-parser PRIVATE\n    ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n    ${Boost_INCLUDE_DIRS}\n)\ntarget_compile_options(rgd-parser PRIVATE\n    -O3 -g -mcx16 -march=native -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free\n)\ntarget_link_libraries(rgd-parser PRIVATE\n    Boost::container\n)\n"
  },
  {
    "path": "parsers/rgd-parser.cpp",
    "content": "#include \"dfsan/dfsan.h\"\n\n#include \"ast.h\"\n#include \"task.h\"\n#include \"union_find.h\"\n#include \"parse-rgd.h\"\n\n#include <unordered_map>\n\nusing namespace rgd;\n\n#ifndef DEBUG\n#define DEBUG 0\n#endif\n\n#if DEBUG\n#define DEBUGF(_str...) do { fprintf(stderr, _str); } while (0)\n#else\n#undef DEBUGF\n#define DEBUGF(_str...) do { } while (0)\n#endif\n\n#ifndef WARNF\n#define WARNF(_str...) do { fprintf(stderr, _str); } while (0)\n// #define WARNF(x...) do { \\\n//     SAYF(cYEL \"[!] \" cBRI \"WARNING: \" cRST x); \\\n//     SAYF(cRST \"\\n\"); \\\n//   } while (0)\n#endif\n\n#define NEED_OFFLINE 0\n\n#if defined(__GNUC__)\nstatic inline bool (likely)(bool x) { return __builtin_expect((x), true); }\nstatic inline bool (unlikely)(bool x) { return __builtin_expect((x), false); }\n#else\nstatic inline bool (likely)(bool x) { return x; }\nstatic inline bool (unlikely)(bool x) { return x; }\n#endif\n\nstatic const std::unordered_map<unsigned, std::pair<unsigned, const char*> > OP_MAP {\n  {__dfsan::Extract, {rgd::Extract, \"extract\"}},\n  {__dfsan::Trunc,   {rgd::Extract, \"extract\"}},\n  {__dfsan::Concat,  {rgd::Concat, \"concat\"}},\n  {__dfsan::ZExt,    {rgd::ZExt, \"zext\"}},\n  {__dfsan::SExt,    {rgd::SExt, \"sext\"}},\n  {__dfsan::Add,     {rgd::Add, \"add\"}},\n  {__dfsan::Sub,     {rgd::Sub, \"sub\"}},\n  {__dfsan::Mul,     {rgd::Mul, \"mul\"}},\n  {__dfsan::UDiv,    {rgd::UDiv, \"udiv\"}},\n  {__dfsan::SDiv,    {rgd::SDiv, \"sdiv\"}},\n  {__dfsan::URem,    {rgd::URem, \"urem\"}},\n  {__dfsan::SRem,    {rgd::SRem, \"srem\"}},\n  {__dfsan::Shl,     {rgd::Shl, \"shl\"}},\n  {__dfsan::LShr,    {rgd::LShr, \"lshr\"}},\n  {__dfsan::AShr,    {rgd::AShr, \"ashr\"}},\n  {__dfsan::And,     {rgd::And, \"and\"}},\n  {__dfsan::Or,      {rgd::Or, \"or\"}},\n  {__dfsan::Xor,     {rgd::Xor, \"xor\"}},\n  // relational comparisons\n#define RELATIONAL_ICMP(cmp) (__dfsan::ICmp | (cmp << 8))\n  {RELATIONAL_ICMP(__dfsan::bveq),  {rgd::Equal, \"equal\"}},\n  {RELATIONAL_ICMP(__dfsan::bvneq), {rgd::Distinct, \"distinct\"}},\n  {RELATIONAL_ICMP(__dfsan::bvugt), {rgd::Ugt, \"ugt\"}},\n  {RELATIONAL_ICMP(__dfsan::bvuge), {rgd::Uge, \"uge\"}},\n  {RELATIONAL_ICMP(__dfsan::bvult), {rgd::Ult, \"ult\"}},\n  {RELATIONAL_ICMP(__dfsan::bvule), {rgd::Ule, \"ule\"}},\n  {RELATIONAL_ICMP(__dfsan::bvsgt), {rgd::Sgt, \"sgt\"}},\n  {RELATIONAL_ICMP(__dfsan::bvsge), {rgd::Sge, \"sge\"}},\n  {RELATIONAL_ICMP(__dfsan::bvslt), {rgd::Slt, \"slt\"}},\n  {RELATIONAL_ICMP(__dfsan::bvsle), {rgd::Sle, \"sle\"}},\n#undef RELATIONAL_ICMP\n};\n\nstatic inline bool is_rel_cmp(uint16_t op, __dfsan::predicate pred) {\n  return ((op & 0xff) == __dfsan::ICmp) && ((op >> 8) == pred);\n}\n\nstatic inline bool eval_icmp(uint16_t op, uint64_t op1, uint64_t op2) {\n  if ((op & 0xff) == __dfsan::ICmp) {\n    switch (op >> 8) {\n      case __dfsan::bveq: return op1 == op2;\n      case __dfsan::bvneq: return op1 != op2;\n      case __dfsan::bvugt: return op1 > op2;\n      case __dfsan::bvuge: return op1 >= op2;\n      case __dfsan::bvult: return op1 < op2;\n      case __dfsan::bvule: return op1 <= op2;\n      case __dfsan::bvsgt: return (int64_t)op1 > (int64_t)op2;\n      case __dfsan::bvsge: return (int64_t)op1 >= (int64_t)op2;\n      case __dfsan::bvslt: return (int64_t)op1 < (int64_t)op2;\n      case __dfsan::bvsle: return (int64_t)op1 <= (int64_t)op2;\n      default: return false;\n    }\n  }\n  return false;\n}\n\nstatic void printAst(FILE* f, const rgd::AstNode *node, int indent) {\n  fprintf(f, \"(%s, \", rgd::AstKindName[node->kind()]);\n  fprintf(f, \"%d, \", node->label());\n  fprintf(f, \"%d, \", node->bits());\n  for(int i = 0; i < node->children_size(); i++) {\n    printAst(f, &node->children(i), indent + 1);\n    if (i != node->children_size() - 1) {\n      fprintf(f, \", \");\n    }\n  }\n  fprintf(f, \")\\n\");\n}\n\nint RGDAstParser::restart(std::vector<symsan::input_t> &inputs) {\n  // save a copy of the inputs\n  inputs_cache = inputs;\n  // clear caches\n  memcmp_cache_.clear(); // inherited from ASTParser\n  root_expr_cache.clear();\n  constraint_cache.clear();\n  ast_size_cache.clear();\n  nested_cmp_cache.clear();\n  concretize_node.clear();\n  branch_to_inputs.clear();\n\n  // reset data-flow dependencies\n  input_size_ = 0;\n  for (auto &i: inputs) {\n    input_size_ += i.second;\n  }\n  data_flow_deps.reset(input_size_);\n  for (auto &s: input_to_branches) {\n    s.clear();\n  }\n  input_to_branches.resize(input_size_);\n\n  return 0;\n}\n\nuint32_t RGDAstParser::map_arg(uint32_t input_id, uint32_t offset, uint32_t length,\n                               constraint_t constraint) {\n  uint32_t hash = 0;\n  auto *buf = inputs_cache[input_id].first;\n  for (uint32_t i = 0; i < length; ++i, ++offset) {\n    uint8_t val = buf[offset];\n    uint32_t arg_index = 0;\n    auto itr = constraint->local_map.find(offset); // FIXME: support input_id\n    if (itr == constraint->local_map.end()) {\n      arg_index = (uint32_t)constraint->input_args.size();\n      constraint->inputs.insert({offset, val});\n      constraint->local_map[offset] = arg_index;\n      constraint->input_args.push_back(std::make_pair(true, 0)); // 0 is to be filled in the aggragation\n    } else {\n      arg_index = itr->second;\n    }\n    if (i == 0) {\n      constraint->shapes[offset] = length;\n      hash = rgd::xxhash(length * 8, rgd::Read, arg_index);\n    } else {\n      constraint->shapes[offset] = 0;\n    }\n  }\n  return hash;\n}\n\n// this combines both AST construction and arg mapping\n[[gnu::hot]]\nbool RGDAstParser::do_uta_rel(dfsan_label label, rgd::AstNode *ret,\n                              constraint_t constraint,\n                              std::unordered_set<dfsan_label> &visited) {\n\n  // needed for recursion?\n  if (unlikely(label < CONST_OFFSET || label == __dfsan::kInitializingLabel)) {\n    WARNF(\"invalid label: %d\\n\", label);\n    return false;\n  }\n\n  dfsan_label_info *info = get_label_info(label);\n  DEBUGF(\"do_uta_real: %u = (l1:%u, l2:%u, op:%u, size:%u, op1:%lu, op2:%lu)\\n\",\n         label, info->l1, info->l2, info->op, info->size, info->op1.i, info->op2.i);\n\n  // we can't really reuse AST nodes across constraints,\n  // but we still need to avoid duplicate nodes within a constraint\n  if (visited.count(label)) {\n    // if a node has been visited, just record its label without expanding\n    ret->set_label(label);\n    ret->set_bits(info->size);\n    return true;\n  }\n\n  // terminal node\n  if (info->op == 0) {\n    // input\n    ret->set_kind(rgd::Read);\n    ret->set_bits(8);\n    ret->set_label(label);\n    uint32_t input_id = info->op2.i;\n    uint32_t offset = info->op1.i;\n    // this check should have been done during label scanning\n    // if (unlikely(offset >= buf_size)) {\n    //   WARNF(\"invalid offset: %lu >= %lu\\n\", offset, buf_size);\n    //   return false;\n    // }\n    ret->set_index(offset);\n    // map arg\n    uint32_t hash = map_arg(input_id, offset, 1, constraint);\n    ret->set_hash(hash);\n#if NEED_OFFLINE\n    std::string val;\n    rgd::buf_to_hex_string(&buf[offset], 1, val);\n    ret->set_value(std::move(val));\n    ret->set_name(\"read\");\n#endif\n    return true;\n  } else if (info->op == __dfsan::Load) {\n    ret->set_kind(rgd::Read);\n    ret->set_bits(info->l2 * 8);\n    ret->set_label(label);\n    uint32_t input_id = get_label_info(info->l1)->op2.i;\n    uint32_t offset = get_label_info(info->l1)->op1.i;\n    // this check should have been done during label scanning\n    // if (unlikely(offset + info->l2 > buf_size)) {\n    //   WARNF(\"invalid offset: %lu + %u > %lu\\n\", offset, info->l2, buf_size);\n    //   return false;\n    // }\n    ret->set_index(offset);\n    // map arg\n    uint32_t hash = map_arg(input_id, offset, info->l2, constraint);\n    ret->set_hash(hash);\n#if NEED_OFFLINE\n    std::string val;\n    rgd::buf_to_hex_string(&buf[offset], info->l2, val);\n    ret->set_value(std::move(val));\n    ret->set_name(\"read\");\n#endif\n    return true;\n  } else if (info->op == __dfsan::fmemcmp) {\n    rgd::AstNode *s1 = ret->add_children();\n    if (unlikely(s1 == nullptr)) {\n      WARNF(\"failed to add children\\n\");\n      return false;\n    }\n    if (info->l1 >= CONST_OFFSET) {\n      if (!do_uta_rel(info->l1, s1, constraint, visited)) {\n        return false;\n      }\n      visited.insert(info->l1);\n    } else {\n      // s1 is a constant array\n      s1->set_kind(rgd::Constant);\n      s1->set_bits(info->size * 8);\n      s1->set_label(0);\n      // use constant args to pass the array\n      auto itr = memcmp_cache_.find(label);\n      if (unlikely(itr == memcmp_cache_.end())) {\n        WARNF(\"memcmp target not found for label %u\\n\", label);\n        return false;\n      }\n      uint32_t arg_index = (uint32_t)constraint->input_args.size();\n      s1->set_index(arg_index);\n      uint16_t chunks = info->size / 8;\n      uint16_t remain = info->size % 8;\n      uint64_t val = 0;\n      for (uint16_t i = 0; i < chunks; i++) {\n        val = *(uint64_t*)&(itr->second.get()[i * 8]);\n        constraint->input_args.push_back(std::make_pair(false, val));\n        constraint->const_num += 1;\n        DEBUGF(\"memcmp constant chunk %d = 0x%lx\\n\", i, val);\n      }\n      if (remain) {\n        val = 0;\n        for (uint16_t i = 0; i < remain; i++) {\n          val |= (uint64_t)itr->second.get()[chunks * 8 + i] << (i * 8);\n        }\n        constraint->input_args.push_back(std::make_pair(false, val));\n        constraint->const_num += 1;\n        DEBUGF(\"memcmp constant remain = %lu\\n\", val);\n      }\n      uint32_t hash = rgd::xxhash(info->size, rgd::Constant, arg_index);\n      s1->set_hash(hash);\n#if NEED_OFFLINE\n      std::string val;\n      rgd::buf_to_hex_string(itr->second, info->size, val);\n      ret->set_value(std::move(val));\n      ret->set_name(\"constant\");\n#endif\n    }\n    rgd::AstNode *s2 = ret->add_children();\n    if (unlikely(s2 == nullptr)) {\n      WARNF(\"failed to add children\\n\");\n      return false;\n    }\n    if (!do_uta_rel(info->l2, s2, constraint, visited)) {\n      return false;\n    }\n    visited.insert(info->l2);\n    ret->set_kind(rgd::Memcmp);\n    ret->set_bits(1);\n    ret->set_label(label);\n    uint32_t hash = rgd::xxhash(s1->hash(), rgd::Memcmp, s2->hash());\n    ret->set_hash(hash);\n#if NEED_OFFLINE\n    ret->set_name(\"memcmp\");\n#endif\n    return true;\n  } else if (info->op == __dfsan::fatoi) {\n    if (unlikely(info->l1 != 0 || info->l2 < CONST_OFFSET)) {\n      WARNF(\"invalid atoi label %u\\n\", label);\n      return false;\n    }\n    dfsan_label_info *src = get_label_info(info->l2);\n    if (unlikely(src->op != __dfsan::Load)) {\n      WARNF(\"invalid atoi source label %u, op = %u\\n\", info->l2, src->op);\n      return false;\n    }\n    visited.insert(info->l2);\n    uint32_t input_id = get_label_info(src->l1)->op2.i;\n    uint32_t offset = get_label_info(src->l1)->op1.i;\n    // this check should have been done during label scanning\n    // if (unlikely(offset >= buf_size)) {\n    //   WARNF(\"invalid offset: %lu >= %lu\\n\", offset, buf_size);\n    //   return false;\n    // }\n    ret->set_bits(info->size);\n    ret->set_label(label);\n    ret->set_index(offset);\n    // special handling for atoi, we are introducing the result/output of\n    // atoi as fake inputs, and solve constraints over the output,\n    // once solved, we convert it back to string\n    // however, because the input is fake, we need to map it specially\n    ret->set_kind(rgd::Read);\n    auto itr = constraint->local_map.find(offset); // FIXME: support input_id\n    if (itr != constraint->local_map.end()) {\n      WARNF(\"atoi inputs should not be involved in other constraints\\n\");\n      return false;\n    }\n    uint32_t hash = 0;\n    uint32_t length = info->size / 8; // bits to bytes\n    // record the offset, base, and original length\n    constraint->atoi_info[offset] = std::make_tuple(length, (uint32_t)info->op1.i, (uint32_t)info->op2.i);\n    for (uint32_t i = 0; i < length; ++i, ++offset) {\n      uint8_t val = 0; // XXX: use 0 as initial value?\n      // because this is fake input, we always map it to a new index\n      uint32_t arg_index = (uint32_t)constraint->input_args.size();\n      constraint->inputs.insert({offset, val});\n      constraint->local_map[offset] = arg_index; // FIXME: support input_id\n      constraint->input_args.push_back(std::make_pair(true, 0)); // 0 is to be filled in the aggragation\n      if (i == 0) {\n        constraint->shapes[offset] = length;\n        // from solver's perspective, atoi and read are the same\n        // they both introduce a new symbolic input as arg_index\n        hash = rgd::xxhash(length * 8, rgd::Read, arg_index);\n      } else {\n        constraint->shapes[offset] = 0;\n      }\n    }\n    ret->set_hash(hash);\n#if NEED_OFFLINE\n    ret->set_name(\"atoi\");\n#endif\n    return true;\n  } else if (info->op == __dfsan::fsize) {\n    // do nothing now\n    WARNF(\"fsize not supported yet\\n\");\n    return false;\n  }\n\n  // common ops, make sure no special ops\n  auto op_itr = OP_MAP.find(info->op);\n  if (op_itr == OP_MAP.end()) {\n    WARNF(\"invalid op: %u\\n\", info->op);\n    return false;\n  }\n  ret->set_kind(op_itr->second.first);\n  ret->set_bits(info->size);\n  ret->set_label(label);\n#if NEED_OFFLINE\n  ret->set_name(op_itr->second.second);\n#endif\n\n  // record op\n  constraint->ops[ret->kind()] = true;\n\n  // in case we needs concretization\n  uint8_t needs_concretization = 0;\n  auto node_itr = concretize_node.find(label);\n  if (node_itr != concretize_node.end()) {\n    needs_concretization = node_itr->second;\n  }\n\n  // now we visit the children\n  rgd::AstNode *left = ret->add_children();\n  if (unlikely(left == nullptr)) {\n    WARNF(\"failed to add children\\n\");\n    return false;\n  }\n  if (likely(needs_concretization != 1) && (info->l1 >= CONST_OFFSET)) {\n    if (!do_uta_rel(info->l1, left, constraint, visited)) {\n      return false;\n    }\n    visited.insert(info->l1);\n  } else {\n    if (unlikely(needs_concretization)) {\n      if (unlikely(!rgd::isRelationalKind(ret->kind()))) {\n        WARNF(\"invalid kind for concretization %u\\n\", ret->kind());\n        return false;\n      }\n    }\n    // constant\n    left->set_kind(rgd::Constant);\n    left->set_label(0);\n    uint32_t size = info->size;\n    // size of concat the sum of the two operands\n    // to get the size of the constant, we need to subtract the size\n    // of the other operand\n    if (info->op == __dfsan::Concat) {\n      if (unlikely(info->l2 == 0)) {\n        WARNF(\"invalid concat node %u\\n\", info->l2);\n        return false;\n      }\n      size -= get_label_info(info->l2)->size;\n    }\n    left->set_bits(size);\n    // map args\n    uint32_t arg_index = (uint32_t)constraint->input_args.size();\n    left->set_index(arg_index);\n    constraint->input_args.push_back(std::make_pair(false, info->op1.i));\n    constraint->const_num += 1;\n    uint32_t hash = rgd::xxhash(size, rgd::Constant, arg_index);\n    left->set_hash(hash);\n#if NEED_OFFLINE\n    left->set_value(std::to_string(info->op1.i));\n    left->set_name(\"constant\");\n#endif\n  }\n\n  // unary ops\n  if (info->op == __dfsan::ZExt || info->op == __dfsan::SExt ||\n      info->op == __dfsan::Extract || info->op == __dfsan::Trunc) {\n    uint32_t hash = rgd::xxhash(info->size, ret->kind(), left->hash());\n    ret->set_hash(hash);\n    uint64_t offset = info->op == __dfsan::Extract ? info->op2.i : 0;\n    ret->set_index(offset);\n    return true;\n  }\n\n  rgd::AstNode *right = ret->add_children();\n  if (unlikely(right == nullptr)) {\n    WARNF(\"failed to add children\\n\");\n    return false;\n  }\n  if (likely(needs_concretization != 2) && (info->l2 >= CONST_OFFSET)) {\n    if (!do_uta_rel(info->l2, right, constraint, visited)) {\n      return false;\n    }\n    visited.insert(info->l2);\n  } else {\n    if (unlikely(needs_concretization)) {\n      if (unlikely(!rgd::isRelationalKind(ret->kind()))) {\n        WARNF(\"invalid kind for concretization %u\\n\", ret->kind());\n        return false;\n      }\n    }\n    // constant\n    right->set_kind(rgd::Constant);\n    right->set_label(0);\n    uint32_t size = info->size;\n    // size of concat the sum of the two operands\n    // to get the size of the constant, we need to subtract the size\n    // of the other operand\n    if (info->op == __dfsan::Concat) {\n      if (unlikely(info->l1 == 0)) {\n        WARNF(\"invalid concat node %u\\n\", info->l1);\n        return false;\n      }\n      size -= get_label_info(info->l1)->size;\n    }\n    right->set_bits(size);\n    // map args\n    uint32_t arg_index = (uint32_t)constraint->input_args.size();\n    right->set_index(arg_index);\n    constraint->input_args.push_back(std::make_pair(false, info->op2.i));\n    constraint->const_num += 1;\n    uint32_t hash = rgd::xxhash(size, rgd::Constant, arg_index);\n    right->set_hash(hash);\n#if NEED_OFFLINE\n    right->set_value(std::to_string(info->op1.i));\n    right->set_name(\"constant\");\n#endif\n  }\n\n  // record comparison operands\n  if (rgd::isRelationalKind(ret->kind())) {\n    constraint->op1 = info->op1.i;\n    constraint->op2 = info->op2.i;\n  }\n\n  // binary ops, we don't really care about comparison ops in jigsaw,\n  // as long as the operands are the same, we can reuse the AST/function\n  uint32_t kind = rgd::isRelationalKind(ret->kind()) ? rgd::Bool : ret->kind();\n  uint32_t hash = rgd::xxhash(left->hash(), (kind << 16) | ret->bits(), right->hash());\n  ret->set_hash(hash);\n\n  return true;\n}\n\n[[gnu::hot]]\nRGDAstParser::constraint_t RGDAstParser::parse_constraint(dfsan_label label) {\n  DEBUGF(\"constructing constraint for label %u\\n\", label);\n  // make sure root is a comparison node\n  // XXX: root should never go oob?\n  dfsan_label_info *info = get_label_info(label);\n  if (unlikely(((info->op & 0xff) != __dfsan::ICmp) && (info->op != __dfsan::fmemcmp))) {\n    WARNF(\"invalid root node %u, non-comparison root op: %u\\n\", label, info->op);\n    return nullptr;\n  }\n\n  // retrieve the ast size\n  if (unlikely(ast_size_cache.size() <= label)) {\n    WARNF(\"invalid label %u, larger than ast_size_cache: %lu\\n\", label, ast_size_cache.size());\n    return nullptr;\n  }\n  auto size = ast_size_cache.at(label);\n  if (unlikely(size == 0)) {\n    WARNF(\"invalid label %u, ast_size_cache is 0\\n\", label);\n    return nullptr;\n  }\n  std::unordered_set<dfsan_label> visited;\n  try {\n    constraint_t constraint = std::make_shared<rgd::Constraint>(size);\n    if (!do_uta_rel(label, constraint->ast.get(), constraint, visited)) {\n      return nullptr;\n    }\n    return constraint;\n  } catch (std::bad_alloc &e) {\n    WARNF(\"failed to allocate memory for constraint\\n\");\n    return nullptr;\n  } catch (std::out_of_range &e) {\n    WARNF(\"AST %u goes out of range at %s\\n\", label, e.what());\n    return nullptr;\n  }\n}\n\n[[gnu::hot]]\ntask_t RGDAstParser::construct_task(const clause_t &clause) {\n  task_t task = std::make_shared<rgd::SearchTask>();\n  for (auto const& node: clause) {\n    auto itr = constraint_cache.find(node->label());\n    if (itr != constraint_cache.end()) {\n      task->add_constraint(itr->second, node->kind());\n      continue;\n    }\n    // save the comparison op because we may have negated it\n    // during transformation\n    constraint_t constraint = parse_constraint(node->label());\n    // to maximize the resuability of the AST, the relational operator\n    // is recorded elsewhere\n    if (likely(constraint != nullptr)) {\n      task->add_constraint(constraint, node->kind());\n      constraint_cache.insert({node->label(), constraint});\n    }\n  }\n  if (!task->empty()) {\n    task->finalize();\n    return task;\n  }\n  return nullptr;\n}\n\n// sometimes llvm will zext bool\ndfsan_label RGDAstParser::strip_zext(dfsan_label label) {\n  dfsan_label_info *info = get_label_info(label);\n  while (info->op == __dfsan::ZExt) {\n    dfsan_label child = info->l1;\n    info = get_label_info(child);\n    if (info->size == 1) {\n      // extending a boolean value\n      return child;\n    } else if ((info->op & 0xff) == __dfsan::ICmp || info->op == __dfsan::fmemcmp) {\n      // extending the result of icmp or memcmp\n      return child;\n    }\n  }\n  return label;\n}\n\n[[gnu::hot]]\nint RGDAstParser::find_roots(dfsan_label label, AstNode *ret,\n                             std::unordered_set<dfsan_label> &subroots) {\n  // assume the root label has been checked by the caller\n  // if (label < CONST_OFFSET || label == kInitializingLabel) {\n  //   WARNF(\"invalid label: %d\\n\", label);\n  //   return INVALID_NODE;\n  // }\n\n  std::vector<dfsan_label> stack;\n  dfsan_label root = label;\n  dfsan_label prev = 0;\n  std::vector<AstNode*> node_stack;\n  AstNode *root_node = ret;\n  std::unordered_set<dfsan_label> visited;\n\n  try{\n  while (root != 0 || !stack.empty()) {\n    if (root != 0) {\n      // check if the node has been visited before\n      if (visited.find(root) != visited.end()) {\n        // already visited, skip the subtree\n        prev = root;\n        root = 0;\n        continue;\n      }\n      // mark to be visit in the future, for in-order and post-order visitors\n      stack.push_back(root);\n      node_stack.push_back(root_node);\n      auto *info = get_label_info(root);\n      if (nested_cmp_cache[info->l1] == 0) {\n        // no nested comparison in the left child, stop going down\n        // again, we only collect a partial AST with comparison nodes as leafs\n        // so the traversal should stop before reaching any actual leaf node\n        root = 0;\n      } else {\n        root = strip_zext(info->l1);\n        if (root) {\n          // create a child node before going down\n          root_node = root_node->add_children();\n          if (unlikely(root_node == nullptr)) {\n            WARNF(\"failed to add children\\n\");\n            return INVALID_NODE;\n          }\n        }\n      }\n    } else {\n      // we have reached some leaf node, going up the tree\n      auto curr = stack.back();\n      auto info = get_label_info(curr);\n      auto zsl2 = strip_zext(info->l2);\n      if (nested_cmp_cache[zsl2] > 0 && prev != zsl2) {\n        // we have a right child, and we haven't visited it yet,\n        // and there is a nested comparison, going down the right tree\n        root = zsl2;\n        root_node = node_stack.back()->add_children();\n        if (unlikely(root_node == nullptr)) {\n          WARNF(\"failed to add children\\n\");\n          return INVALID_NODE;\n        }\n      } else {\n        DEBUGF(\"label %d, l1 %d, l2 %d, op %d, size %d, op1 %ld, op2 %ld\\n\",\n               curr, info->l1, info->l2, info->op, info->size, info->op1.i, info->op2.i);\n        // both children nodes have been visited, process the node (post-order)\n        auto node = node_stack.back();\n\n        if (info->op == __dfsan::Not) {\n          DEBUGF(\"simplify not: %d, %d\\n\", info->l2, info->size);\n          if (unlikely(node->children_size() != 1)) {\n            WARNF(\"child node size != 1\\n\");\n            return INVALID_NODE;\n          }\n          if (unlikely(info->size != 1)) {\n            WARNF(\"info size != 1\\n\");\n            return INVALID_NODE;\n          }\n          rgd::AstNode *child = node->mutable_children(0);\n          node->set_bits(1);\n          if (child->kind() == rgd::Bool) {\n            node->set_kind(rgd::Bool);\n            node->set_boolvalue(!child->boolvalue());\n            node->clear_children();\n          } else {\n            node->set_kind(rgd::LNot);\n          }\n        } else if (info->op == __dfsan::And) {\n          // if And apprears, it must be LAnd, try to simplify\n          DEBUGF(\"simplify land: %d LAnd %d, %d\\n\", info->l1, info->l2, info->size);\n          if (unlikely(node->children_size() == 0)) {\n            WARNF(\"child node size == 0\\n\");\n            return INVALID_NODE;\n          }\n          if (unlikely(info->size != 1)) {\n            WARNF(\"info size != 1\\n\");\n            return INVALID_NODE;\n          }\n          uint32_t child = 0;\n          rgd::AstNode *left = nullptr;\n          rgd::AstNode *right = nullptr;\n          if (nested_cmp_cache[info->l1] > 0) {\n            left = node->mutable_children(0);\n            child = 1; // if left child exists, rhs will be child 1\n          }\n          if (nested_cmp_cache[info->l2] > 0) {\n            right = node->mutable_children(child);\n          }\n          node->set_bits(1);\n\n          if (unlikely(info->l1 == 0)) {\n            // lhs is a constant\n            if (info->op1.i == 0) { // 0 LAnd x = 0\n              node->set_kind(rgd::Bool);\n              node->set_boolvalue(0);\n              node->clear_children();\n            } else if (info->op1.i == 1) { // 1 LAnd x = x\n              if (unlikely(right == nullptr)) {\n                WARNF(\"right child is null\\n\");\n                return INVALID_NODE;\n              }\n              node->CopyFrom(*right);\n            } else {\n              WARNF(\"invalid constant %ld\\n\", info->op1.i);\n              return INVALID_NODE;\n            }\n          } else {\n            if (unlikely(left == nullptr)) {\n              WARNF(\"left child is null\\n\");\n              return INVALID_NODE;\n            }\n            if (unlikely(right == nullptr)) {\n              WARNF(\"right child is null\\n\");\n              return INVALID_NODE;\n            }\n            // check for constant\n            if (left->kind() == rgd::Bool) {\n              if (left->boolvalue() == 0) { // 0 LAnd x = 0\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(0);\n                node->clear_children();\n              } else if (right->kind() == rgd::Bool) {\n                // both lhs and rhs are constants\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(right->boolvalue()); // 1 LAnd b = b\n                node->clear_children();\n              } else { // 1 LAnd x = x\n                // lhs is 1, rhs is not\n                node->CopyFrom(*right);\n              }\n            } else if (right->kind() == rgd::Bool) {\n              // lhs is not a constant, check rhs\n              if (right->boolvalue() == 0) { // x LAnd 0 = 0\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(0);\n                node->clear_children();\n              } else { // x LAnd 1 = x\n                // rhs is 1, lhs is not\n                node->CopyFrom(*left);\n              }\n            } else {\n              // both sides are symbolic\n              node->set_kind(rgd::LAnd);\n            }\n          }\n        } else if (info->op == __dfsan::Or) {\n          DEBUGF(\"simplify lor: %d LOr %d, %d\\n\", info->l1, info->l2, info->size);\n          if (unlikely(node->children_size() == 0)) {\n            WARNF(\"child node size == 0\\n\");\n            return INVALID_NODE;\n          }\n          if (unlikely(info->size != 1)) {\n            WARNF(\"info size != 1\\n\");\n            return INVALID_NODE;\n          }\n          uint32_t child = 0;\n          rgd::AstNode *left = nullptr;\n          rgd::AstNode *right = nullptr;\n          if (nested_cmp_cache[info->l1] > 0) {\n            left = node->mutable_children(0);\n            child = 1; // if left child exists, rhs will be child 1\n          }\n          if (nested_cmp_cache[info->l2] > 0) {\n            right = node->mutable_children(child);\n          }\n          node->set_bits(1);\n\n          if (unlikely(info->l1 == 0)) {\n            // lhs is a constant\n            if (info->op1.i == 1) { // x LOr 1 = 1\n              node->set_kind(rgd::Bool);\n              node->set_boolvalue(1);\n              node->clear_children();\n            } else if (info->op1.i == 0) { // 0 LOr x = x\n              if (unlikely(right == nullptr)) {\n                WARNF(\"right child is null\\n\");\n                return INVALID_NODE;\n              }\n              node->CopyFrom(*right);\n            } else {\n              WARNF(\"invalid constant %ld\\n\", info->op1.i);\n              return INVALID_NODE;\n            }\n          } else {\n            if (unlikely(left == nullptr)) {\n              WARNF(\"left child is null\\n\");\n              return INVALID_NODE;\n            }\n            if (unlikely(right == nullptr)) {\n              WARNF(\"right child is null\\n\");\n              return INVALID_NODE;\n            }\n            // check for constant\n            if (left->kind() == rgd::Bool) {\n              if (left->boolvalue() == 1) { // 1 LOr x = 1\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(1);\n                node->clear_children();\n              } else if (right->kind() == rgd::Bool) {\n                // both lhs and rhs are constants\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(right->boolvalue()); // 0 LOr b = b\n                node->clear_children();\n              } else { // 0 LOr x = x\n                // lhs is 0, rhs is not\n                node->CopyFrom(*right);\n              }\n            } else if (right->kind() == rgd::Bool) {\n              if (right->boolvalue() == 1) { // x LOr 1 = 1\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(1);\n                node->clear_children();\n              } else { // x LOr 0 = x\n                // rhs is 0, lhs is not\n                node->CopyFrom(*left);\n              }\n            } else {\n              // both sides are symbolic\n              node->set_kind(rgd::LOr);\n            }\n          }\n        } else if (info->op == __dfsan::Xor) {\n          DEBUGF(\"simplify lxor: %d LXOr %d, %d\\n\", info->l1, info->l2, info->size);\n          if (unlikely(node->children_size() == 0)) {\n            WARNF(\"child node size == 0\\n\");\n            return INVALID_NODE;\n          }\n          if (unlikely(info->size != 1)) {\n            WARNF(\"info size != 1\\n\");\n            return INVALID_NODE;\n          }\n          uint32_t child = 0;\n          rgd::AstNode *left = nullptr;\n          rgd::AstNode *right = nullptr;\n          if (nested_cmp_cache[info->l1] > 0) {\n            left = node->mutable_children(0);\n            child = 1; // if left child exists, rhs will be child 1\n          }\n          if (nested_cmp_cache[info->l2] > 0) {\n            right = node->mutable_children(child);\n          }\n          node->set_bits(1);\n\n          if (likely(info->l1 == 0)) {\n            // lhs is a constant\n            if (unlikely(right == nullptr)) {\n              WARNF(\"right child is null\\n\");\n              return INVALID_NODE;\n            }\n            if (unlikely(right->kind() == rgd::Bool)) {\n              // rhs is a constant\n              node->set_kind(rgd::Bool);\n              node->set_boolvalue(right->boolvalue() ^ (uint32_t)info->op1.i);\n              node->clear_children();\n            } else {\n              // rhs is symbolic\n              if (info->op1.i == 1) { // 1 LXor x = LNot x\n                node->set_kind(rgd::LNot);\n              } else { // 0 LXor x = x\n                node->CopyFrom(*right);\n              }\n            }\n          } else {\n            if (unlikely(left == nullptr)) {\n              WARNF(\"left child is null\\n\");\n              return INVALID_NODE;\n            }\n            if (unlikely(right == nullptr)) {\n              WARNF(\"right child is null\\n\");\n              return INVALID_NODE;\n            }\n            // check for constant\n            if (unlikely(left->kind() == rgd::Bool)) {\n              if (unlikely(right->kind() == rgd::Bool)) {\n                // both lhs and rhs are constants\n                node->set_kind(rgd::Bool);\n                node->set_boolvalue(right->boolvalue() ^ left->boolvalue());\n                node->clear_children();\n              } else if (left->boolvalue() == 0) { // 0 LXor x = x\n                node->CopyFrom(*right);\n              } else { // 1 LXor x = LNot x\n                node->set_kind(rgd::LNot);\n              }\n            } else if (unlikely(right->kind() == rgd::Bool)) {\n              // rhs is constant, lhs is not\n              if (right->boolvalue() == 0) { // x LXor 0 = x\n                node->CopyFrom(*left);\n              } else { // x LXor 1 = LNot x\n                node->set_kind(rgd::LNot);\n              }\n            } else {\n              // both sides are symbolic\n              node->set_kind(rgd::Xor);\n            }\n          }\n        } else if ((info->op & 0xff) == __dfsan::ICmp) {\n          // cmp node\n          node->set_bits(1);\n          if (likely(node->children_size() == 0)) {\n            // if the node has no children, it's a leaf node\n            // check size, concretize if too large\n            auto size = ast_size_cache.at(curr);\n            // load previous value as previous concretization could have\n            // changed the ast size used for allocation\n            auto itr = concretize_node.find(curr);\n            uint8_t concretize = (itr != concretize_node.end() ? itr->second : 0);\n            if (size > max_ast_size_) {\n              DEBUGF(\"AST size too large: %d = %u\\n\", curr, size);\n              auto left_size = ast_size_cache.at(info->l1);\n              auto right_size = ast_size_cache.at(info->l2);\n              if (left_size > max_ast_size_) {\n                // concretize left\n                concretize |= 1;\n                // update new size\n                size -= (left_size - 1);\n              }\n              if (right_size > max_ast_size_) {\n                // concretize right\n                concretize |= 2;\n                // update new size\n                size -= (right_size - 1);\n              }\n              DEBUGF(\"new size: %d = %u\\n\", curr, size);\n              ast_size_cache[curr] = size;\n              concretize_node[curr] = concretize;\n            }\n\n            // check for concrete ops\n            uint8_t concrete_ops = concretize;\n            concrete_ops |= info->l1 == 0 ? 1 : 0;\n            concrete_ops |= info->l2 == 0 ? 2 : 0;\n            if (concrete_ops == 3) {\n              // well, both sides have been concretized, simplify the node\n              node->set_kind(rgd::Bool);\n              node->set_boolvalue(eval_icmp(info->op, info->op1.i, info->op2.i));\n            } else {\n              auto itr = OP_MAP.find(info->op);\n              if (unlikely(itr == OP_MAP.end())) {\n                WARNF(\"invalid icmp op: %d\\n\", info->op);\n                return INVALID_NODE;\n              }\n              node->set_kind(itr->second.first);\n              node->set_label(curr);\n#ifdef DEBUG\n              subroots.insert(curr);\n#endif\n            }\n          } else if (node->children_size() == 1) {\n            // one side has another icmp, must be simplifiable\n            if (!is_rel_cmp(info->op, __dfsan::bveq) && !is_rel_cmp(info->op, __dfsan::bvneq)) {\n              WARNF(\"unexpected icmp: %d\\n\", info->op);\n              // unexpected icmp, set as a constant boolean\n              node->set_kind(rgd::Bool);\n              node->set_boolvalue(eval_icmp(info->op, info->op1.i, info->op2.i));\n            } else {\n              if (nested_cmp_cache[info->l1]) {\n                // nested icmp in the lhs\n                rgd::AstNode *left = node->mutable_children(0);\n                if (unlikely(left->bits() != 1)) {\n                  WARNF(\"nested icmp lhs bits != 1\\n\");\n                  return INVALID_NODE;\n                }\n                if (likely(info->l2 == 0)) {\n                  if (is_rel_cmp(info->op, __dfsan::bveq)) {\n                    if (info->op2.i == 1) { // checking bool == true\n                      node->CopyFrom(*left);\n                    } else { // checking bool == false\n                      node->set_kind(rgd::LNot);\n                    }\n                  } else { // bvneq\n                    if (info->op2.i == 0) { // checking bool != false\n                      node->CopyFrom(*left);\n                    } else { // checking bool != true\n                      node->set_kind(rgd::LNot);\n                    }\n                  }\n                } else {\n                  // l2 != 0, bool icmp bool ?!\n                  WARNF(\"bool icmp bool ?!\\n\");\n                  node->set_kind(rgd::Bool);\n                  node->set_boolvalue(0);\n                  node->clear_children();\n                }\n              } else if (nested_cmp_cache[info->l2] > 0) {\n                // nested icmp in the rhs\n                rgd::AstNode *right = node->mutable_children(0);\n                if (unlikely(right->bits() != 1)) {\n                  WARNF(\"nested icmp rhs bits != 1\\n\");\n                  return INVALID_NODE;\n                }\n                if (likely(info->l1 == 0)) {\n                  if (is_rel_cmp(info->op, __dfsan::bveq)) {\n                    if (info->op1.i == 1) { // checking true == bool\n                      node->CopyFrom(*right);\n                    } else { // checking false == bool\n                      node->set_kind(rgd::LNot);\n                    }\n                  } else { // bvneq\n                    if (info->op1.i == 0) { // checking false != bool\n                      node->CopyFrom(*right);\n                    } else { // checking true != bool\n                      node->set_kind(rgd::LNot);\n                    }\n                  }\n                } else {\n                  // l1 != 0, bool icmp bool ?!\n                  WARNF(\"bool icmp bool ?!\\n\");\n                  node->set_kind(rgd::Bool);\n                  node->set_boolvalue(0);\n                  node->clear_children();\n                }\n              } else {\n                WARNF(\"icmp with child yet no nested icmp?!\\n\");\n                return INVALID_NODE;\n              }\n            }\n          } else {\n            // both sides have another icmp, set as a constant boolean\n            node->set_kind(rgd::Bool);\n            node->set_boolvalue(eval_icmp(info->op, info->op1.i, info->op2.i));\n            node->clear_children();\n          }\n        } else if (info->op == __dfsan::fmemcmp) {\n          // memcmp is also considered as a root node (relational comparison)\n          if (unlikely(node->children_size() != 0)) {\n            WARNF(\"memcmp should not have additional icmp\");\n            return INVALID_NODE;\n          }\n          node->set_bits(1); // XXX: treat memcmp as a boolean\n          node->set_kind(rgd::Memcmp); // fix later\n          node->set_label(curr);\n#ifdef DEBUG\n          subroots.insert(curr);\n#endif\n        } else {\n          WARNF(\"Invalid AST node: op = %d\\n\", info->op);\n          return INVALID_NODE;\n        }\n\n        // mark as visited and pop from stack\n        visited.insert(curr);\n        prev = curr;\n        stack.pop_back();\n        node_stack.pop_back();\n      }\n    }\n  }\n  } catch (std::out_of_range &e) {\n    WARNF(\"AST %u goes out of range at %s\\n\", label, e.what());\n    return INVALID_NODE;\n  }\n\n  return 0;\n}\n\n[[gnu::hot]]\nbool RGDAstParser::scan_labels(dfsan_label label) {\n  // assuming label has been checked by caller\n  // assuming the last label scanned is the size of the cache\n  // turns out linear scan is way faster than tree traversal\n  for (size_t i = ast_size_cache.size(); i <= label; i++) {\n    if (i == 0) { // the constant label\n      ast_size_cache.push_back(1); // constant takes one node too\n      branch_to_inputs.emplace_back(input_dep_t(input_size_));\n      nested_cmp_cache.push_back(0);\n      continue;\n    }\n    dfsan_label_info *info = get_label_info(i);\n    // conservatively check validity of labels\n    // so following parsing will not throw exceptions\n    if (unlikely(info->l1 >= size_ || info->l2 >= size_)) {\n      WARNF(\"invalid label: %lu, l1=%u, l2=%u\\n\", i, info->l1, info->l2);\n      return false;\n    }\n    if (info->op == 0) {\n      // AST nodes\n      ast_size_cache.push_back(1); // one Read node\n      // input deps\n      uint32_t input_id = info->op2.i;\n      uint32_t offset = info->op1.i;\n      // skip if invalid\n      if (unlikely(input_id >= inputs_cache.size())) {\n        WARNF(\"invalid input id: %u\\n\", input_id);\n        return false;\n      }\n      size_t buf_size = inputs_cache[input_id].second;\n      if (unlikely(offset >= buf_size)) {\n        WARNF(\"invalid input offset: %u >= %lu\\n\", offset, buf_size);\n        return false;\n      }\n      branch_to_inputs.emplace_back(input_dep_t(input_size_));\n      // get flattened index\n      size_t idx = input_to_dep_idx(input_id, offset);\n      auto &itr = branch_to_inputs[i];\n      itr.set(idx); // flattened location\n#if DEBUG\n      assert(branch_to_inputs[i].find_first() == idx);\n#endif\n      // nested cmp?\n      nested_cmp_cache.push_back(0);\n    } else if (info->op == __dfsan::Load) {\n      // AST nodes\n      ast_size_cache.push_back(1); // one Read node\n      // input deps\n      uint32_t input_id = get_label_info(info->l1)->op2.i;\n      uint32_t offset = get_label_info(info->l1)->op1.i;\n      // skip if invalid\n      if (unlikely(input_id >= inputs_cache.size())) {\n        WARNF(\"invalid input id: %u\\n\", input_id);\n        return false;\n      }\n      size_t buf_size = inputs_cache[input_id].second;\n      if (unlikely(offset + info->l2 > buf_size)) {\n        WARNF(\"invalid input offset: %u + %u > %lu\\n\", offset, info->l2, buf_size);\n        return false;\n      }\n      branch_to_inputs.emplace_back(input_dep_t(input_size_));\n      // get flattened index\n      size_t idx = input_to_dep_idx(input_id, offset);\n      auto &itr = branch_to_inputs[i];\n      for (size_t n = 0; n < info->l2; ++n) {\n        // DEBUGF(\"adding input: %lu <- %lu\\n\", i, offset + n);\n        itr.set(idx + n); // input offsets\n      }\n#if DEBUG\n      if (likely(info->l2 > 0))\n        assert(branch_to_inputs[i].find_first() == idx);\n#endif\n      // nested cmp?\n      nested_cmp_cache.push_back(0);\n    } else {\n      // AST nodes\n      uint32_t left  = info->l1 == 0 ? 1 : ast_size_cache[info->l1];\n      uint32_t right = info->l2 == 0 ? 1 : ast_size_cache[info->l2];\n      ast_size_cache.push_back(left + right + 1);\n      // input deps\n      branch_to_inputs.emplace_back(input_dep_t(input_size_));\n      auto &itr = branch_to_inputs[i];\n      if (info->l1 != 0) itr |= branch_to_inputs[info->l1];\n      if (info->l2 != 0) itr |= branch_to_inputs[info->l2];\n      // nested cmp?\n      uint8_t nested = 0;\n      nested += info->l1 == 0 ? 0 : nested_cmp_cache[info->l1];\n      nested += info->l2 == 0 ? 0 : nested_cmp_cache[info->l2];\n      if (info->op == __dfsan::fmemcmp || (info->op & 0xff) == __dfsan::ICmp)\n        nested += 1;\n      nested_cmp_cache.push_back(nested);\n    }\n  }\n#if DEBUG\n  DEBUGF(\"ast_size: %d = %u\\n\", label, ast_size_cache[label]);\n  DEBUGF(\"input deps %d:\", label);\n  auto &itr = branch_to_inputs[label];\n  for (auto i = itr.find_first(); i != input_dep_t::npos; i = itr.find_next(i)) {\n    DEBUGF(\"%lu \", i);\n  }\n  DEBUGF(\"\\n\");\n  DEBUGF(\"nested cmp: %d = %d\\n\", label, nested_cmp_cache[label]);\n#endif\n  return true;\n}\n\nRGDAstParser::expr_t RGDAstParser::get_root_expr(dfsan_label label) {\n  if (label < CONST_OFFSET || label == __dfsan::kInitializingLabel || label >= size_) {\n    return nullptr;\n  }\n\n  expr_t root = nullptr;\n  auto itr = root_expr_cache.find(label);\n  if (itr != root_expr_cache.end()) {\n    root = itr->second;\n  } else {\n    // update ast_size and branch_to_inputs caches\n    if (!scan_labels(label)) {\n      return nullptr;\n    }\n    root = std::make_shared<rgd::AstNode>();\n    std::unordered_set<dfsan_label> subroots;\n    // we start by constructing a boolean formula with relational expressions\n    // as leaf nodes\n    if (find_roots(label, root.get(), subroots) != 0) {\n      return nullptr;\n    }\n    root_expr_cache.insert({label, root});\n#if DEBUG\n    for (auto const& subroot : subroots) {\n      DEBUGF(\"subroot: %d\\n\", subroot);\n    }\n#endif\n  }\n#if DEBUG\n  printAst(stderr, root.get(), 0);\n#endif\n\n  return root;\n}\n\n[[gnu::hot]]\nint RGDAstParser::to_nnf(bool expected_r, rgd::AstNode *node) {\n  int ret = 0;\n  if (!expected_r) {\n    // we're looking for a negated formula\n    if (node->kind() == rgd::LNot) {\n      // double negation\n      if (unlikely(node->children_size() != 1)) {\n        WARNF(\"LNot expect a singple child\\n\");\n        return INVALID_NODE;\n      }\n      rgd::AstNode *child = node->mutable_children(0);\n      // transform the child, now looking for a true formula\n      ret = to_nnf(true, child);\n      if (unlikely(ret != 0)) { return ret; }\n      node->CopyFrom(*child);\n    } else if (node->kind() == rgd::LAnd) {\n      // De Morgan's law\n      if (unlikely(node->children_size() != 2)) {\n        WARNF(\"LAnd expect two children\\n\");\n        return INVALID_NODE;\n      }\n      node->set_kind(rgd::LOr);\n      ret = to_nnf(false, node->mutable_children(0));\n      if (unlikely(ret != 0)) { return ret; }\n      ret = to_nnf(false, node->mutable_children(1));\n      if (unlikely(ret != 0)) { return ret; }\n    } else if (node->kind() == rgd::LOr) {\n      // De Morgan's law\n      if (unlikely(node->children_size() != 2)) {\n        WARNF(\"LOr expect two children\\n\");\n        return INVALID_NODE;\n      }\n      node->set_kind(rgd::LAnd);\n      ret = to_nnf(false, node->mutable_children(0));\n      if (unlikely(ret != 0)) { return ret; }\n      ret = to_nnf(false, node->mutable_children(1));\n      if (unlikely(ret != 0)) { return ret; }\n    } else {\n      // leaf node\n      if (rgd::isRelationalKind(node->kind())) {\n        node->set_kind(rgd::negate_cmp(node->kind()));\n      } else if (node->kind() == rgd::Memcmp) {\n        // memcmp is also considered as a leaf node (relational comparison)\n        // memcmp == 0 actually means s1 == s2\n        // so we don't need to negate it\n      } else {\n        WARNF(\"Unexpected node kind %d\\n\", node->kind());\n        return INVALID_NODE;\n      }\n    }\n  } else {\n    // we're looking for a true formula\n    if (node->kind() == rgd::LNot) {\n      if (unlikely(node->children_size() != 1)) {\n        WARNF(\"LNot expect a singple child\\n\");\n        return INVALID_NODE;\n      }\n      rgd::AstNode *child = node->mutable_children(0);\n      // negate the child, now looking for a false formula\n      ret = to_nnf(false, child);\n      if (unlikely(ret != 0)) { return ret; }\n      node->CopyFrom(*child);\n    } else if (node->kind() == rgd::Memcmp) {\n      // memcmp is also considered as a leaf node (relational comparison)\n      // memcmp == 1 actually means s1 != s2\n      // so we negate it\n      node->set_kind(rgd::MemcmpN);\n    } else {\n      for (int i = 0; i < node->children_size(); i++) {\n        ret = to_nnf(expected_r, node->mutable_children(i));\n        if (unlikely(ret != 0)) { return ret; }\n      }\n    }\n  }\n\n  return 0;\n}\n\n[[gnu::hot]]\nvoid RGDAstParser::to_dnf(const rgd::AstNode *node, formula_t &formula) {\n  if (node->kind() == rgd::LAnd) {\n    formula_t left, right;\n    to_dnf(&node->children(0), left);\n    to_dnf(&node->children(1), right);\n    for (auto const& sub1: left) {\n      for (auto const& sub2: right) {\n        clause_t clause;\n        clause.insert(clause.end(), sub1.begin(), sub1.end());\n        clause.insert(clause.end(), sub2.begin(), sub2.end());\n        formula.push_back(clause);\n      }\n    }\n    if (left.size() == 0) {\n      formula = right;\n    }\n  } else if (node->kind() == rgd::LOr) {\n    // copy the clauses from the children\n    to_dnf(&node->children(0), formula);\n    to_dnf(&node->children(1), formula);\n  } else {\n    clause_t clause;\n    clause.push_back(node);\n    formula.push_back(clause);\n  }\n}\n\nint RGDAstParser::parse_cond(dfsan_label label, bool result, bool add_nested,\n                             std::vector<uint64_t> &tasks) {\n\n  // given a condition, we want to parse them into a DNF form of\n  // relational sub-expressions, where each sub-expression only contains\n  // one relational operator at the root\n  expr_t orig_root = get_root_expr(label);\n  if (orig_root == nullptr) {\n    WARNF(\"failed to get root expr for label %u\\n\", label);\n    return -1;\n  } else if (orig_root->kind() == rgd::Bool) {\n    // if the simplified formula is a boolean constant, nothing to do\n    DEBUGF(\"cond simplified to be a constant\\n\");\n    return 0;\n  }\n\n  // duplication the original root for transformation\n  expr_t root = std::make_shared<rgd::AstNode>();\n  root->CopyFrom(*orig_root);\n\n  // next, convert the formula to NNF form, possibly negate the root\n  // if we are looking for a false formula\n  bool target_direction = !result;\n  if (to_nnf(target_direction, root.get()) != 0) {\n    WARNF(\"failed to convert to NNF\\n\");\n    return -1;\n  }\n#if DEBUG\n  printAst(stderr, root.get(), 0);\n#endif\n  // then we need to convert the boolean formula into a DNF form\n  formula_t dnf;\n  to_dnf(root.get(), dnf);\n\n  // finally, we construct a search task for each clause in the DNF\n  for (auto const& clause : dnf) {\n    task_t task = construct_task(clause);\n    if (task != nullptr) {\n      tasks.push_back(save_task(task));\n    } else {\n      WARNF(\"failed to construct task for clause\\n\");\n      continue; // skip the nested task if the current task is invalid\n    }\n\n    if (solve_nested_) {\n      // collect dependencies based on data-flow (i.e., shared input bytes)\n      clause_t nested_caluse;\n      std::unordered_set<dfsan_label> inserted;\n      // first, copy the last branch constraints\n      nested_caluse.insert(nested_caluse.end(), clause.begin(), clause.end());\n      for (auto const& var : clause) inserted.insert(var->label());\n      bool has_nested = false;\n      // then, iterate each var in the clause\n      for (auto const& var: clause) {\n        const dfsan_label l = var->label();\n        // assert(branch_to_inputs.size() > l);\n        auto &itr = branch_to_inputs[l];\n        auto citr = concretize_node.find(l);\n        if (unlikely(citr != concretize_node.end())) {\n          // skip dependencies if the operand is concretized\n          if (citr->second == 1) {\n            // if the lhs is concretized, use the rhs deps only\n            itr = branch_to_inputs[get_label_info(l)->l2];\n          } else if (citr->second == 2) {\n            // if the rhs is concretized, use the lhs deps only\n            itr = branch_to_inputs[get_label_info(l)->l1];\n          }\n        }\n        if (unlikely(itr.find_first() == input_dep_t::npos)) {\n          // not actual input dependency, skip\n          continue;\n        }\n        // for each input byte used in the var, we collect additional constraints\n        // first, we use union find to add additional related input bytes\n        std::unordered_set<size_t> related_inputs;\n        for (auto input = itr.find_first(); input != input_dep_t::npos;\n             input = itr.find_next(input)) {\n          data_flow_deps.get_set(input, related_inputs); // FIXME: should be fine?\n        }\n        // then, we collect the branch constraints for each related input byte\n        for (auto input: related_inputs) {\n          auto const& bucket = input_to_branches[input];\n          for (auto const& nc : bucket) {\n            if (inserted.count(nc->label())) continue;\n            inserted.insert(nc->label());\n            has_nested = true;\n#if DEBUG\n            fprintf(stderr, \"add nested constraint: (%d, %d)\\n\", nc->label(), nc->kind());\n#endif\n            nested_caluse.push_back(nc.get()); // XXX: borrow the raw ptr, should be fine?\n          }\n        }\n      }\n      if (has_nested) { // only add nested task if there are additional constraints\n        task_t nested_task = construct_task(nested_caluse);\n        if (nested_task != nullptr) {\n          nested_task->base_task = task;\n          tasks.push_back(save_task(nested_task));\n        }\n      }\n    }\n  }\n\n  if (solve_nested_ && add_nested) {\n    save_constraint(orig_root, result);\n  }\n\n  return 0;\n}\n\nbool RGDAstParser::save_constraint(expr_t expr, bool result) {\n  // assumes scan_labels has been called\n\n  // make a copy of the expr, just in case\n  expr_t root = std::make_shared<rgd::AstNode>();\n  root->CopyFrom(*expr);\n\n  // first, convert the formula to NNF form, possibly negate the root\n  // if we are looking for a false formula\n  if (to_nnf(result, root.get()) != 0) {\n    return false;\n  }\n#if DEBUG\n  printAst(stderr, root.get(), 0);\n#endif\n  // then we need to convert the boolean formula into a DNF form\n  // NOTE: all ptrs in the formula are raw ptrs *temporarily*\n  // burrowed from the root expr, they will be gone after return\n  formula_t dnf;\n  to_dnf(root.get(), dnf);\n\n  // now we associate the constraints with input bytes\n  for (auto const& clause : dnf) {\n    // each clause is a conjunction of relational expressions\n    // that need to be evaluated to true (satisfied)\n    // we associate that with the corresponding input bytes\n    for (auto const& var : clause) {\n      // copy the node, as the original node will be gone after return\n      expr_t node = std::make_shared<rgd::AstNode>();\n      node->CopyFrom(*var);\n      // get the input bytes\n      const dfsan_label l = node->label();\n#if DEBUG\n      assert(branch_to_inputs.size() > l);\n#endif\n      auto &itr = branch_to_inputs[l];\n      auto citr = concretize_node.find(l);\n      if (unlikely(citr != concretize_node.end())) {\n        if (citr->second == 1) {\n          // if the lhs is concretized, use the rhs deps only\n          itr = branch_to_inputs[get_label_info(l)->l2];\n        } else if (citr->second == 2) {\n          // if the rhs is concretized, use the lhs deps only\n          itr = branch_to_inputs[get_label_info(l)->l1];\n        }\n      }\n      auto root = itr.find_first();\n      if (root == input_dep_t::npos) {\n        // not actual input dependency, skip\n        // this can happen for atoi\n        continue;\n      }\n      // update uion find\n      for (auto input = itr.find_next(root); input != input_dep_t::npos;\n           input = itr.find_next(input)) {\n#if DEBUG\n        DEBUGF(\"union input bytes: (%zu, %zu)\\n\", root, input);\n#endif\n        root = data_flow_deps.merge(root, input);\n        if (unlikely(root == rgd::UnionFind::INVALID)) {\n          WARNF(\"invalid input to union find\\n\");\n          return false;\n        }\n      }\n      // add the constraint\n      auto &bucket = input_to_branches[root];\n      bucket.push_back(node);\n      // we need to record the kind as it may be negated during transformation\n#if DEBUG\n      DEBUGF(\"add df constraint: %zu <- (%d, %d)\\n\", root, l, node->kind());\n#endif\n    }\n  }\n\n  return true;\n}\n\nvoid RGDAstParser::add_nested_constraint(task_t task, const clause_t &nested_caluse) {\n  for (auto const& node: nested_caluse) {\n    // check cache, should happen most of the time\n    auto itr = constraint_cache.find(node->label());\n    if (likely(itr != constraint_cache.end())) {\n      task->add_constraint(itr->second, node->kind());\n      continue;\n    }\n    // otherwise, parse the AST into a constraint\n    constraint_t constraint = parse_constraint(node->label());\n    if (likely(constraint != nullptr)) {\n      task->add_constraint(constraint, node->kind());\n      constraint_cache.insert({node->label(), constraint});\n    }\n  }\n}\n\nint RGDAstParser::parse_gep(dfsan_label ptr_label, uptr ptr,\n                            dfsan_label index_label, int64_t index,\n                            uint64_t num_elems, uint64_t elem_size,\n                            int64_t current_offset, bool enum_index,\n                            std::vector<uint64_t> &tasks) {\n  // check validity of the labels\n  if (index_label < CONST_OFFSET || index_label == __dfsan::kInitializingLabel\n      || index_label >= size_) {\n    return -1;\n  }\n\n  // update ast_size and branch_to_inputs caches\n  // if the index_label has been scanned before, it won't be scanned again\n  if (!scan_labels(index_label)) {\n    return -1;\n  }\n\n  // sanity checks\n  if (unlikely(ast_size_cache.size() <= index_label)) {\n    WARNF(\"invalid label %u, larger than ast_size_cache: %lu\\n\", index_label, ast_size_cache.size());\n    return -1;\n  }\n  if (unlikely(nested_cmp_cache.at(index_label) > 0)) {\n    WARNF(\"unexpected nested cmp in parse_gep for %u, skip\\n\", index_label);\n    return -1;\n  }\n\n  auto ast_size = ast_size_cache.at(index_label);\n  if (unlikely(ast_size == 0)) {\n    WARNF(\"invalid label %u, ast_size_cache is 0\\n\", index_label);\n    return 0;\n  } else if (unlikely(ast_size > max_ast_size_)) {\n    DEBUGF(\"skip large AST (%lu) in parse_gep for %u\\n\", ast_size, index_label);\n    return 0; // not an error, just skip\n  }\n\n  // early return if nothing to do\n  if (!enum_index || // if we are not enumerating the index\n      (num_elems == 0 && // if the GEP type is not an array,\n       // and we also don't have a pointer label\n       ptr_label)) {\n    return 0;\n  }\n\n  // hmm, since the gep constraints we want to solve are not in the union table,\n  // which means parse_constraint will not work,\n  // so we have to construct the tasks directly here\n  //\n\n  // first, parse the index_label into a partial constraint\n  // again, the index_label is not a cmp node\n  constraint_t partial_constraint = nullptr;\n  // check cache first\n  auto itr = constraint_cache.find(index_label);\n  if (itr != constraint_cache.end()) {\n    partial_constraint = itr->second;\n  } else {\n    // otherwise, parse the AST into a constraint\n    std::unordered_set<dfsan_label> visited;\n    partial_constraint = std::make_shared<rgd::Constraint>(ast_size + 3); // leave extra one buffer?\n\n    // add the constant node first\n    auto const_node = partial_constraint->ast->add_children();\n    const_node->set_kind(rgd::Constant);\n    const_node->set_label(0);\n    uint32_t size = get_label_info(index_label)->size;\n    const_node->set_bits(size); // size of the index\n    // map args\n    uint32_t arg_index = 0; // first arg\n    const_node->set_index(arg_index);\n    partial_constraint->input_args.push_back(std::make_pair(false, 0)); // use 0 as a temporary placeholder\n    partial_constraint->const_num += 1;\n    uint32_t hash = rgd::xxhash(size, rgd::Constant, arg_index);\n    const_node->set_hash(hash);\n\n    // now, parse the index_label\n    auto index_node = partial_constraint->ast->add_children();\n    try {\n      if (!do_uta_rel(index_label, index_node, partial_constraint, visited)) {\n        WARNF(\"failed to parse index_label %u\\n\", index_label);\n        return -1;\n      }\n    } catch (std::bad_alloc &e) {\n      WARNF(\"failed to allocate memory for gep constraint\\n\");\n      return -1;\n    } catch (std::out_of_range &e) {\n      WARNF(\"AST %u goes out of range at %s\\n\", index_label, e.what());\n      return -1;\n    }\n\n    // setup root cmp node\n    auto cmp_node = partial_constraint->ast.get();\n    cmp_node->set_kind(rgd::Equal); // a placeholder, not really useful\n    cmp_node->set_label(0); // so jigsaw will not cache it as visited\n    cmp_node->set_bits(1);\n    // again, in jigsaw, we don't care about actual cmp kind\n    hash = rgd::xxhash(const_node->hash(), (rgd::Bool << 16) | 1, index_node->hash());\n    cmp_node->set_hash(hash);\n\n    // done parsing, add to cache\n    constraint_cache.insert({index_label, partial_constraint});\n  }\n\n  if (unlikely(partial_constraint == nullptr)) {\n    WARNF(\"failed to parse index_label %u\\n\", index_label);\n    return -1;\n  }\n\n  // next, retrive nested constraints if needed\n  clause_t nested_caluse;\n  if (solve_nested_) {\n    auto &itr = branch_to_inputs[index_label];\n    if (unlikely(itr.find_first() != input_dep_t::npos)) {\n      // use union find to add additional related input bytes\n      std::unordered_set<size_t> related_inputs;\n      for (auto input = itr.find_first(); input != input_dep_t::npos;\n           input = itr.find_next(input)) {\n        data_flow_deps.get_set(input, related_inputs); // FIXME: should be fine?\n      }\n      // collect the branch constraints for each related input byte\n      std::unordered_set<dfsan_label> inserted;\n      for (auto input: related_inputs) {\n        auto const& bucket = input_to_branches[input];\n        for (auto const& nc : bucket) {\n          if (inserted.insert(nc->label()).second) {\n#if DEBUG\n            fprintf(stderr, \"add nested constraint for gep: (%d, %d)\\n\", nc->label(), nc->kind());\n#endif\n            nested_caluse.push_back(nc.get()); // XXX: borrow the raw ptr, should be fine?\n          }\n        }\n      }\n    }\n  }\n\n  // finally, we are ready to construct GEP tasks\n  //\n\n  if (enum_index) {\n    // TODO:\n  }\n\n  // bounds solving are seperated from index enumeration now\n\n  return 0;\n}\n\nint RGDAstParser::add_constraints(dfsan_label label, uint64_t result) {\n  // offset constraint should be in the form of r = (offset == label) = true\n  if (!solve_nested_) {\n    // only matters in nested mode\n    return 0;\n  }\n\n  // check validity of the label\n  if (label < CONST_OFFSET || label == __dfsan::kInitializingLabel || label >= size_) {\n    return -1;\n  }\n  // check validity of the result\n  if (result != 1) {\n    WARNF(\"unexpected result in add_constraints: %lu\\n\", result);\n    return -1;\n  }\n\n  expr_t root = nullptr;\n  auto itr = root_expr_cache.find(label);\n  if (itr != root_expr_cache.end()) {\n    // the constraint has already been added, skip\n    return 0;\n  }\n\n  // update ast_size and branch_to_inputs caches\n  if (!scan_labels(label)) {\n    return -1;\n  }\n  // other sanitity checks\n  // 1. there shouldn't be any nested cmp\n  if (nested_cmp_cache[label] > 0) {\n    WARNF(\"unexpected nested cmp in add_constraints for %u\\n\", label);\n    return -1;\n  }\n  dfsan_label_info *info = get_label_info(label);\n  // 2. the label should be a bveq one\n  if (!is_rel_cmp(info->op, __dfsan::bveq)) {\n    WARNF(\"unexpected cmp op (%d) in add_constraints for %u\\n\", info->op, label);\n    return -1;\n  }\n  // 3. one operand should be a constant\n  if (info->l1 != 0) {\n    WARNF(\"unexpected non-constant operand1 (%u) in add_constraints for %u\\n\", info->l1, label);\n    return -1;\n  }\n  // check for ast size\n  if (ast_size_cache[info->l2] > max_ast_size_) {\n    DEBUGF(\"skip large AST (%lu) in add_constraints for %u\\n\", ast_size_cache[label], label);\n    return 0; // not an error, just skip\n  }\n  // setup node\n  root = std::make_shared<rgd::AstNode>(1);\n  root->set_bits(1);\n  root->set_kind(rgd::Equal);\n  root->set_label(label);\n  root_expr_cache.insert({label, root});\n\n  if (!save_constraint(root, true)) {\n    return -1;\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "python/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 17)\n\nfind_package(Python3 COMPONENTS Interpreter Development)\n\nif (Python3_Development_FOUND)\n  message(STATUS \"Python3_VERSION ${Python3_VERSION}\")\nelse()\n  message(FATAL_ERROR \"Cannot find Python development!\")\nendif()\n\nadd_library(pysymsan SHARED symsan-py.cpp)\nset_target_properties(pysymsan PROPERTIES PREFIX \"\")\nset_target_properties(pysymsan PROPERTIES OUTPUT_NAME \"symsan\")\nset_target_properties(pysymsan PROPERTIES SUFFIX \".cpython-${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR}-x86_64-linux-gnu.so\")\ntarget_include_directories(pysymsan PRIVATE\n  ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n  ${Python3_INCLUDE_DIRS}\n)\ntarget_link_libraries(pysymsan PRIVATE\n  launcher\n  z3parser\n  z3\n  ${Python3_LIBRARIES}\n  rt\n)\ninstall (TARGETS Fastgen DESTINATION ${SYMSAN_LIB_DIR})\n"
  },
  {
    "path": "python/README.md",
    "content": "Provide a python binding to launch symsan-instrumented binary, receive events, parse constraints, and solve constraints.\n\n```\nstatic PyMethodDef SymSanMethods[] = {\n  {\"init\", SymSanInit, METH_VARARGS, \"initialize symsan target\"},\n  {\"config\", (PyCFunction)SymSanConfig, METH_VARARGS | METH_KEYWORDS, \"config symsan\"},\n  {\"run\", (PyCFunction)SymSanRun, METH_VARARGS | METH_KEYWORDS, \"run symsan target, optional stdin=file\"},\n  {\"read_event\", SymSanReadEvent, METH_VARARGS, \"read a symsan event\"},\n  {\"terminate\", (PyCFunction)SymSanTerminate, METH_NOARGS, \"terminate current symsan instance\"},\n  {\"destroy\", (PyCFunction)SymSanDestroy, METH_NOARGS, \"destroy symsan target\"},\n  {\"reset_input\", InitParser, METH_VARARGS, \"reset the symbolic expression parser with a new input\"},\n  {\"parse_cond\", ParseCond, METH_VARARGS, \"parse trace_cond event into solving tasks\"},\n  {\"parse_gep\", ParseGEP, METH_VARARGS, \"parse trace_gep event into solving tasks\"},\n  {\"add_constraint\", AddConstraint, METH_VARARGS, \"add a constraint\"},\n  {\"record_memcmp\", RecordMemcmp, METH_VARARGS, \"record a memcmp event\"},\n  {\"solve_task\", SolveTask, METH_VARARGS, \"solve a task\"},\n  {NULL, NULL, 0, NULL}  /* Sentinel */\n};\n```\n\nCurrently only z3 solver is supported, will merge jigsaw and i2s later.\n"
  },
  {
    "path": "python/symsan-py.cpp",
    "content": "#include \"defs.h\"\n#include \"debug.h\"\n#include \"version.h\"\n\n#include \"dfsan/dfsan.h\"\n\nextern \"C\" {\n#include \"launch.h\"\n}\n\n#include \"parse-z3.h\"\n\n#include <z3++.h>\n\n#include <memory>\n#include <utility>\n#include <vector>\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <fcntl.h>\n\n#define PY_SSIZE_T_CLEAN\n#include <Python.h>\n\n// z3parser\nstatic z3::context __z3_context;\nsymsan::Z3ParserSolver *__z3_parser = nullptr;\n\n\nstatic PyObject* SymSanInit(PyObject *self, PyObject *args) {\n  const char *program;\n  unsigned long long ut_size = uniontable_size;\n\n  if (!PyArg_ParseTuple(args, \"s|K\", &program, &ut_size)) {\n    return NULL;\n  }\n\n  // setup launcher\n  void *shm_base = symsan_init(program, ut_size);\n  if (shm_base == (void *)-1) {\n    fprintf(stderr, \"Failed to map shm: %s\\n\", strerror(errno));\n    return PyErr_SetFromErrno(PyExc_OSError);\n  }\n\n  // setup parser\n  __z3_parser = new symsan::Z3ParserSolver(shm_base, ut_size, __z3_context);\n  if (__z3_parser == nullptr) {\n    fprintf(stderr, \"Failed to initialize parser\\n\");\n    return PyErr_NoMemory();\n  }\n\n  return PyCapsule_New(shm_base, \"dfsan_label_info\", NULL);\n}\n\nstatic PyObject* SymSanConfig(PyObject *self, PyObject *args, PyObject *keywds) {\n  static const char *kwlist[]\n      = {\"input\", \"args\", \"debug\", \"bounds\", \"undefined\", NULL};\n  const char *input = NULL;\n  PyObject *iargs = NULL;\n  int debug = 0;\n  int bounds = 0;\n  int solve_ub = 0;\n\n  if (!PyArg_ParseTupleAndKeywords(args, keywds, \"s|O!iii\",\n      const_cast<char**>(kwlist), &input, &PyList_Type, &iargs,\n      &debug, &bounds, &solve_ub)) {\n    return NULL;\n  }\n\n  if (input == NULL) {\n    PyErr_SetString(PyExc_ValueError, \"missing input\");\n    return NULL;\n  }\n\n  if (symsan_set_input(input) != 0) {\n    PyErr_SetString(PyExc_ValueError, \"invalid input\");\n    return NULL;\n  }\n\n  if (args != NULL) {\n    Py_ssize_t argc = PyList_Size(iargs);\n    char *argv[argc];\n    for (Py_ssize_t i = 0; i < argc; i++) {\n      PyObject *item = PyList_GetItem(iargs, i);\n      if (item == NULL) {\n        PyErr_SetString(PyExc_RuntimeError, \"failed to retrieve args list\");\n        return NULL;\n      }\n      if (!PyUnicode_Check(item)) {\n        PyErr_SetString(PyExc_TypeError, \"args must be a list of strings\");\n        return NULL;\n      }\n      argv[i] = const_cast<char*>(PyUnicode_AsUTF8(item));\n    }\n    if (symsan_set_args(argc, argv) != 0) {\n      PyErr_SetString(PyExc_ValueError, \"invalid args\");\n      return NULL;\n    }\n  }\n\n  if (symsan_set_debug(debug) != 0) {\n    PyErr_SetString(PyExc_ValueError, \"invalid debug\");\n    return NULL;\n  }\n\n  if (symsan_set_bounds_check(bounds) != 0) {\n    PyErr_SetString(PyExc_ValueError, \"invalid bounds\");\n    return NULL;\n  }\n\n  if (symsan_set_solve_ub(solve_ub) != 0) {\n    PyErr_SetString(PyExc_ValueError, \"invalid solve_ub\");\n    return NULL;\n  }\n\n  Py_RETURN_NONE;\n}\n\nstatic PyObject* SymSanRun(PyObject *self, PyObject *args, PyObject *keywds) {\n  static const char *kwlist[] = {\"stdin\", NULL};\n  const char *file = NULL;\n  int fd = 0;\n\n  if (!PyArg_ParseTupleAndKeywords(args, keywds, \"|s\", const_cast<char**>(kwlist), &file)) {\n    return NULL;\n  }\n\n  if (file) {\n    fd = open(file, O_RDONLY);\n    if (fd < 0) {\n      PyErr_SetFromErrno(PyExc_OSError);\n      return NULL;\n    }\n  }\n\n  int ret = symsan_run(fd);\n\n  if (file) {\n    close(fd);\n  }\n  \n  if (ret < 0) {\n    PyErr_SetString(PyExc_ValueError, \"failed to launch target\");\n    return NULL;\n  }\n\n  Py_RETURN_NONE;\n}\n\nstatic PyObject* SymSanReadEvent(PyObject *self, PyObject *args) {\n  PyObject *ret;\n  char *buf;\n  Py_ssize_t size;\n  unsigned timeout = 0;\n\n  if (!PyArg_ParseTuple(args, \"n|I\", &size, &timeout)) {\n    return NULL;\n  }\n\n  if (size <= 0) {\n    PyErr_SetString(PyExc_ValueError, \"invalid buffer size\");\n    return NULL;\n  }\n\n  buf = (char *)malloc(size);\n\n  ssize_t read = symsan_read_event(buf, size, timeout);\n  if (read < 0) {\n    PyErr_SetFromErrno(PyExc_OSError);\n    free(buf);\n    return NULL;\n  }\n\n  ret = PyBytes_FromStringAndSize(buf, read);\n  free(buf);\n\n  return ret;\n}\n\nstatic PyObject* SymSanTerminate(PyObject *self) {\n  if (symsan_terminate() != 0) {\n    PyErr_SetString(PyExc_RuntimeError, \"failed to terminate target\");\n    return NULL;\n  }\n\n  int status, is_killed;\n  is_killed = symsan_get_exit_status(&status);\n\n  PyObject *ret = PyTuple_New(2);\n  PyTuple_SetItem(ret, 0, PyLong_FromLong(status));\n  PyTuple_SetItem(ret, 1, PyLong_FromLong(is_killed));\n\n  return ret;\n}\n\nstatic PyObject* SymSanDestroy(PyObject *self) {\n  if (__z3_parser != nullptr) {\n    delete __z3_parser;\n    symsan_destroy();\n    __z3_parser = nullptr;\n  }\n  Py_RETURN_NONE;\n}\n\nstatic PyObject* InitParser(PyObject *self, PyObject *args) {\n  if (__z3_parser == nullptr) {\n    PyErr_SetString(PyExc_RuntimeError, \"parser not initialized\");\n    return NULL;\n  }\n\n  std::vector<symsan::input_t> inputs;\n  PyObject *iargs = NULL;\n\n  if (!PyArg_ParseTuple(args, \"O!\", &PyList_Type, &iargs)) {\n    return NULL;\n  }\n\n  Py_ssize_t argc = PyList_Size(iargs);\n  for (Py_ssize_t i = 0; i < argc; i++) {\n    PyObject *item = PyList_GetItem(iargs, i);\n    if (item == NULL) {\n      PyErr_SetString(PyExc_RuntimeError, \"failed to retrieve args list\");\n      return NULL;\n    }\n    if (!PyBytes_Check(item)) {\n      PyErr_SetString(PyExc_TypeError, \"args must be a list of bytes\");\n      return NULL;\n    }\n    Py_ssize_t size;\n    char *data;\n    if (PyBytes_AsStringAndSize(item, &data, &size) != 0) {\n      // exception should have been set?\n      return NULL;\n    }\n    inputs.push_back({(uint8_t*)data, size});\n  }\n\n  if (__z3_parser->restart(inputs) != 0) {\n    PyErr_SetString(PyExc_RuntimeError, \"failed to restart parser\");\n    return NULL;\n  }\n\n  Py_RETURN_NONE;\n}\n\nstatic PyObject* ParseCond(PyObject *self, PyObject *args) {\n  if (__z3_parser == nullptr) {\n    PyErr_SetString(PyExc_RuntimeError, \"parser not initialized\");\n    return NULL;\n  }\n\n  PyObject *ret;\n  dfsan_label label = 0;\n  uint64_t result = 0;\n  uint16_t flags = 0;\n\n  if (!PyArg_ParseTuple(args, \"IKH\", &label, &result, &flags)) {\n    return NULL;\n  }\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_cond(label, result, flags & F_ADD_CONS, tasks) != 0) {\n    PyErr_SetString(PyExc_RuntimeError, \"failed to parse condition\");\n    return NULL;\n  }\n\n  ret = PyList_New(tasks.size());\n  for (size_t i = 0; i < tasks.size(); i++) {\n    PyObject *task = PyLong_FromUnsignedLongLong(tasks[i]);\n    PyList_SetItem(ret, i, task);\n  }\n\n  return ret;\n}\n\nstatic PyObject* ParseGEP(PyObject *self, PyObject *args) {\n  if (__z3_parser == nullptr) {\n    PyErr_SetString(PyExc_RuntimeError, \"parser not initialized\");\n    return NULL;\n  }\n\n  PyObject *ret;\n  dfsan_label ptr_label = 0;\n  uptr ptr = 0;\n  dfsan_label index_label = 0;\n  int64_t index = 0;\n  uint64_t num_elems = 0;\n  uint64_t elem_size = 0;\n  int64_t current_offset = 0;\n  bool enum_index = false; // XXX: default to false?\n\n  if (!PyArg_ParseTuple(args, \"IKILKKLp\", &ptr_label, &ptr, &index_label, &index,\n      &num_elems, &elem_size, &current_offset, &enum_index)) {\n    return NULL;\n  }\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_gep(ptr_label, ptr, index_label, index, num_elems,\n                             elem_size, current_offset, enum_index, tasks) != 0) {\n    PyErr_SetString(PyExc_RuntimeError, \"failed to parse GEP\");\n    return NULL;\n  }\n\n  ret = PyList_New(tasks.size());\n  for (size_t i = 0; i < tasks.size(); i++) {\n    PyObject *task = PyLong_FromUnsignedLongLong(tasks[i]);\n    PyList_SetItem(ret, i, task);\n  }\n\n  return ret;\n}\n\nstatic PyObject* AddConstraint(PyObject *self, PyObject *args) {\n  if (__z3_parser == nullptr) {\n    PyErr_SetString(PyExc_RuntimeError, \"parser not initialized\");\n    return NULL;\n  }\n\n  dfsan_label label = 0;\n  uint64_t val = 0;\n\n  if (!PyArg_ParseTuple(args, \"IL\", &label, &val)) {\n    return NULL;\n  }\n\n  if (__z3_parser->add_constraints(label, val) != 0) {\n    PyErr_SetString(PyExc_RuntimeError, \"failed to add constraint\");\n    return NULL;\n  }\n\n  Py_RETURN_NONE;\n}\n\nstatic PyObject* RecordMemcmp(PyObject *self, PyObject *args) {\n  if (__z3_parser == nullptr) {\n    PyErr_SetString(PyExc_RuntimeError, \"parser not initialized\");\n    return NULL;\n  }\n\n  dfsan_label label = 0;\n  PyObject *buf = NULL;\n\n  if (!PyArg_ParseTuple(args, \"IS\", &label, &buf)) {\n    return NULL;\n  }\n\n  Py_ssize_t size;\n  char *data;\n  if (PyBytes_AsStringAndSize(buf, &data, &size) != 0) {\n    // exception should have been set?\n    return NULL;\n  }\n\n  if (__z3_parser->record_memcmp(label, (uint8_t*)data, size) != 0) {\n    PyErr_SetString(PyExc_RuntimeError, \"failed to record memcmp\");\n    return NULL;\n  }\n\n  Py_RETURN_NONE;\n}\n\nstatic PyObject* SolveTask(PyObject *self, PyObject *args) {\n  if (__z3_parser == nullptr) {\n    PyErr_SetString(PyExc_RuntimeError, \"parser not initialized\");\n    return NULL;\n  }\n\n  uint64_t id = 0;\n  unsigned timeout = 5000;\n  if (!PyArg_ParseTuple(args, \"K|I\", &id, &timeout)) {\n    return NULL;\n  }\n\n  symsan::Z3ParserSolver::solution_t solutions;\n  int status = __z3_parser->solve_task(id, timeout, solutions);\n\n  PyObject *sols = PyList_New(solutions.size());\n  for (size_t i = 0; i < solutions.size(); i++) {\n    PyObject *sol = PyTuple_New(3);\n    auto val = solutions[i];\n    PyTuple_SetItem(sol, 0, PyLong_FromUnsignedLong(val.id));\n    PyTuple_SetItem(sol, 1, PyLong_FromUnsignedLong(val.offset));\n    PyTuple_SetItem(sol, 2, PyLong_FromUnsignedLong(val.val));\n    PyList_SetItem(sols, i, sol);\n  }\n\n  PyObject *ret = PyTuple_New(2);\n  PyTuple_SetItem(ret, 0, PyLong_FromLong(status));\n  PyTuple_SetItem(ret, 1, sols);\n\n  return ret;\n}\n\nstatic PyMethodDef SymSanMethods[] = {\n  {\"init\", SymSanInit, METH_VARARGS, \"initialize symsan target\"},\n  {\"config\", (PyCFunction)SymSanConfig, METH_VARARGS | METH_KEYWORDS, \"config symsan\"},\n  {\"run\", (PyCFunction)SymSanRun, METH_VARARGS | METH_KEYWORDS, \"run symsan target, optional stdin=file\"},\n  {\"read_event\", SymSanReadEvent, METH_VARARGS, \"read a symsan event\"},\n  {\"terminate\", (PyCFunction)SymSanTerminate, METH_NOARGS, \"terminate current symsan instance\"},\n  {\"destroy\", (PyCFunction)SymSanDestroy, METH_NOARGS, \"destroy symsan target\"},\n  {\"reset_input\", InitParser, METH_VARARGS, \"reset the symbolic expression parser with a new input\"},\n  {\"parse_cond\", ParseCond, METH_VARARGS, \"parse trace_cond event into solving tasks\"},\n  {\"parse_gep\", ParseGEP, METH_VARARGS, \"parse trace_gep event into solving tasks\"},\n  {\"add_constraint\", AddConstraint, METH_VARARGS, \"add a constraint\"},\n  {\"record_memcmp\", RecordMemcmp, METH_VARARGS, \"record a memcmp event\"},\n  {\"solve_task\", SolveTask, METH_VARARGS, \"solve a task\"},\n  {NULL, NULL, 0, NULL}  /* Sentinel */\n};\n\nstatic char SymSanDoc[] = \"Python3 wrapper over SymSan launch, parser, and solver.\";\n\nstatic PyModuleDef SymSanModule = {\n  PyModuleDef_HEAD_INIT,\n  \"symsan\",   /* name of module */\n  SymSanDoc,  /* module documentation, may be NULL */\n  -1,         /* size of per-interpreter state of the module,\n                 or -1 if the module keeps state in global variables. */\n  SymSanMethods\n};\n\nPyMODINIT_FUNC\nPyInit_symsan(void) {\n  // check if initialized before?\n  if (__z3_parser != nullptr) {\n    delete __z3_parser;\n    symsan_destroy();\n  }\n  return PyModule_Create(&SymSanModule);\n}\n"
  },
  {
    "path": "python/test.py",
    "content": "import sys\nimport ctypes\nimport symsan\n\nclass pipe_msg(ctypes.Structure):\n    _pack_ = 1\n    _fields_ = [(\"type\", ctypes.c_uint16),\n                (\"flags\", ctypes.c_uint16),\n                (\"instance_id\", ctypes.c_uint32),\n                (\"addr\", ctypes.c_ulonglong),\n                (\"context\", ctypes.c_uint32),\n                (\"id\", ctypes.c_uint32),\n                (\"label\", ctypes.c_uint32),\n                (\"result\", ctypes.c_uint64)]\n\nclass memcmp_msg(ctypes.Structure):\n    _pack_ = 1\n    _fields_ = [(\"label\", ctypes.c_uint32)]\n    content = bytes()\n\nprog = sys.argv[1]\nfile = sys.argv[2]\n\nsymsan.init(sys.argv[1])\nsymsan.config(file, args=[prog, file], debug=1, bounds=0)\nsymsan.run()\n\nf = open(file, \"rb\")\nbuf = f.read()\nsymsan.reset_input([buf])\n\nwhile True:\n    e = symsan.read_event(ctypes.sizeof(pipe_msg))\n    if len(e) < ctypes.sizeof(pipe_msg):\n        break\n    msg = pipe_msg.from_buffer_copy(e)\n    print(f\"received msg: type={msg.type}, flags={msg.flags}, \"\n          f\"addr={msg.addr:x}, context={msg.context}, cid={msg.id}, \"\n          f\"label={msg.label}, result={msg.result}\")\n\n    tasks = []\n    if msg.type == 0:\n        tasks = symsan.parse_cond(msg.label, msg.result, msg.flags)\n        print(tasks)\n    elif msg.type == 2 and msg.flags == 1:\n        label = msg.label\n        size = msg.result\n        m = symsan.read_event(ctypes.sizeof(memcmp_msg) + size)\n        if len(m) < ctypes.sizeof(memcmp_msg) + size:\n            print(\"error reading memcmp msg\")\n            break\n        buf = memcmp_msg.from_buffer_copy(m)\n        if buf.label != label:\n            print(\"error reading memcmp msg\")\n            break\n        buf.content = m[ctypes.sizeof(memcmp_msg):]\n        print(f\"memcmp content: {buf.content.hex()}\")\n        symsan.record_memcmp(label, buf.content)\n\n    for task in tasks:\n        r, sol = symsan.solve_task(task)\n        print(sol)\n\nstatus, is_killed = symsan.terminate()\nprint(f\"exit status {status}, killed? {is_killed}\")\n\nsymsan.destroy()\n\n"
  },
  {
    "path": "runtime/CMakeLists.txt",
    "content": "# Add path for custom compiler-rt modules.\nlist(INSERT CMAKE_MODULE_PATH 0 \"${CMAKE_CURRENT_SOURCE_DIR}/cmake\")\n\nset(COMPILER_RT_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE PATH\n\"Path where built compiler-rt libraries should be stored.\")\nset(COMPILER_RT_EXEC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/bin CACHE PATH\n\"Path where built compiler-rt executables should be stored.\")\nset(COMPILER_RT_SOURCE_DIR \"${CMAKE_CURRENT_SOURCE_DIR}\")\nset(COMPILER_RT_INSTALL_PATH ${CMAKE_INSTALL_PREFIX} CACHE PATH\n\"Path where built compiler-rt libraries should be installed.\")\n\nset(COMPILER_RT_LIBRARY_OUTPUT_DIR ${COMPILER_RT_OUTPUT_DIR})\nset(COMPILER_RT_LIBRARY_INSTALL_DIR ${SYMSAN_LIB_DIR})\n\nset(ARM64 aarch64)\nset(X86_64 x86_64)\nset(MIPS64 mips64 mips64el)\n\nif(APPLE)\n  set(ARM64 arm64)\n  set(X86_64 x86_64 x86_64h)\nendif()\n\nset(CAN_TARGET_${X86_64} 1)\nset(DFSAN_SUPPORTED_ARCH ${X86_64}) #  ${MIPS64} ${ARM64}\nset(SANITIZER_COMMON_SUPPORTED_ARCH ${X86_64}) #  ${MIPS64} ${ARM64}\n\nlist(APPEND SANITIZER_COMMON_CFLAGS \"-stdlib=libc++\")\nlist(APPEND SANITIZER_COMMON_CFLAGS -O3)\nlist(APPEND SANITIZER_COMMON_CFLAGS -g)\nlist(APPEND SANITIZER_COMMON_CFLAGS -fPIC)\n\ninclude(CheckIncludeFile)\ncheck_include_file(rpc/xdr.h HAVE_RPC_XDR_H)\nif (NOT HAVE_RPC_XDR_H)\n  set(HAVE_RPC_XDR_H 0)\nendif()\n\nadd_custom_target(compiler-rt ALL)\nadd_custom_target(install-compiler-rt)\nadd_custom_target(install-compiler-rt-stripped)\n\nset_property(\n  TARGET\n    compiler-rt\n    install-compiler-rt\n    install-compiler-rt-stripped\n  PROPERTY\n    FOLDER \"Compiler-RT Misc\"\n)\n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\n\ninclude(AddCompilerRT)\ninclude(SanitizerUtils)\n\nset(CMAKE_CXX_STANDARD 14)\n\nadd_subdirectory(sanitizer_common)\nadd_subdirectory(interception)\nadd_subdirectory(dfsan)\n"
  },
  {
    "path": "runtime/cmake/AddCompilerRT.cmake",
    "content": "include(ExternalProject)\ninclude(CompilerRTUtils)\n\nfunction(set_target_output_directories target output_dir)\n  # For RUNTIME_OUTPUT_DIRECTORY variable, Multi-configuration generators\n  # append a per-configuration subdirectory to the specified directory.\n  # To avoid the appended folder, the configuration specific variable must be\n  # set 'RUNTIME_OUTPUT_DIRECTORY_${CONF}':\n  # RUNTIME_OUTPUT_DIRECTORY_DEBUG, RUNTIME_OUTPUT_DIRECTORY_RELEASE, ...\n  if(CMAKE_CONFIGURATION_TYPES)\n    foreach(build_mode ${CMAKE_CONFIGURATION_TYPES})\n      string(TOUPPER \"${build_mode}\" CONFIG_SUFFIX)\n      set_target_properties(\"${target}\" PROPERTIES\n          \"ARCHIVE_OUTPUT_DIRECTORY_${CONFIG_SUFFIX}\" ${output_dir}\n          \"LIBRARY_OUTPUT_DIRECTORY_${CONFIG_SUFFIX}\" ${output_dir}\n          \"RUNTIME_OUTPUT_DIRECTORY_${CONFIG_SUFFIX}\" ${output_dir})\n    endforeach()\n  else()\n    set_target_properties(\"${target}\" PROPERTIES\n        ARCHIVE_OUTPUT_DIRECTORY ${output_dir}\n        LIBRARY_OUTPUT_DIRECTORY ${output_dir}\n        RUNTIME_OUTPUT_DIRECTORY ${output_dir})\n  endif()\nendfunction()\n\n# Tries to add an \"object library\" target for a given list of OSs and/or\n# architectures with name \"<name>.<arch>\" for non-Darwin platforms if\n# architecture can be targeted, and \"<name>.<os>\" for Darwin platforms.\n# add_compiler_rt_object_libraries(<name>\n#                                  OS <os names>\n#                                  ARCHS <architectures>\n#                                  SOURCES <source files>\n#                                  CFLAGS <compile flags>\n#                                  DEFS <compile definitions>\n#                                  DEPS <dependencies>\n#                                  ADDITIONAL_HEADERS <header files>)\nfunction(add_compiler_rt_object_libraries name)\n  cmake_parse_arguments(LIB \"\" \"\" \"OS;ARCHS;SOURCES;CFLAGS;DEFS;DEPS;ADDITIONAL_HEADERS\"\n    ${ARGN})\n  set(libnames)\n  if(APPLE)\n    foreach(os ${LIB_OS})\n      set(libname \"${name}.${os}\")\n      set(libnames ${libnames} ${libname})\n      set(extra_cflags_${libname} ${DARWIN_${os}_CFLAGS})\n      list_intersect(LIB_ARCHS_${libname} DARWIN_${os}_ARCHS LIB_ARCHS)\n    endforeach()\n  else()\n    foreach(arch ${LIB_ARCHS})\n      set(libname \"${name}.${arch}\")\n      set(libnames ${libnames} ${libname})\n      set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS})\n      if(NOT CAN_TARGET_${arch})\n        message(FATAL_ERROR \"Architecture ${arch} can't be targeted\")\n        return()\n      endif()\n    endforeach()\n  endif()\n\n  foreach(libname ${libnames})\n    add_library(${libname} OBJECT ${LIB_SOURCES})\n    if(LIB_DEPS)\n      add_dependencies(${libname} ${LIB_DEPS})\n    endif()\n\n    # Strip out -msse3 if this isn't macOS.\n    set(target_flags ${LIB_CFLAGS})\n    if(APPLE AND NOT \"${libname}\" MATCHES \".*\\.osx.*\")\n      list(REMOVE_ITEM target_flags \"-msse3\")\n    endif()\n\n    set_target_compile_flags(${libname}\n      ${CMAKE_CXX_FLAGS} ${extra_cflags_${libname}} ${target_flags})\n    set_property(TARGET ${libname} APPEND PROPERTY\n      COMPILE_DEFINITIONS ${LIB_DEFS})\n    set_target_properties(${libname} PROPERTIES FOLDER \"Compiler-RT Libraries\")\n    if(APPLE)\n      set_target_properties(${libname} PROPERTIES\n        OSX_ARCHITECTURES \"${LIB_ARCHS_${libname}}\")\n    endif()\n  endforeach()\nendfunction()\n\n# Takes a list of object library targets, and a suffix and appends the proper\n# TARGET_OBJECTS string to the output variable.\n# format_object_libs(<output> <suffix> ...)\nmacro(format_object_libs output suffix)\n  foreach(lib ${ARGN})\n    list(APPEND ${output} $<TARGET_OBJECTS:${lib}.${suffix}>)\n  endforeach()\nendmacro()\n\nfunction(add_compiler_rt_component name)\n  add_custom_target(${name})\n  set_target_properties(${name} PROPERTIES FOLDER \"Compiler-RT Misc\")\n  if(COMMAND runtime_register_component)\n    runtime_register_component(${name})\n  endif()\n  add_dependencies(compiler-rt ${name})\nendfunction()\n\nfunction(add_asm_sources output)\n  set(${output} ${ARGN} PARENT_SCOPE)\n  # Xcode will try to compile asm files as C ('clang -x c'), and that will fail.\n  if (${CMAKE_GENERATOR} STREQUAL \"Xcode\")\n    enable_language(ASM)\n  else()\n    # Pass ASM file directly to the C++ compiler.\n    set_source_files_properties(${ARGN} PROPERTIES LANGUAGE C)\n  endif()\nendfunction()\n\nmacro(set_output_name output name arch)\n  if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR)\n    set(${output} ${name})\n  else()\n    if(ANDROID AND ${arch} STREQUAL \"i386\")\n      set(${output} \"${name}-i686${COMPILER_RT_OS_SUFFIX}\")\n    else()\n      set(${output} \"${name}-${arch}${COMPILER_RT_OS_SUFFIX}\")\n    endif()\n  endif()\nendmacro()\n\n# Adds static or shared runtime for a list of architectures and operating\n# systems and puts it in the proper directory in the build and install trees.\n# add_compiler_rt_runtime(<name>\n#                         {STATIC|SHARED}\n#                         ARCHS <architectures>\n#                         OS <os list>\n#                         SOURCES <source files>\n#                         CFLAGS <compile flags>\n#                         LINK_FLAGS <linker flags>\n#                         DEFS <compile definitions>\n#                         LINK_LIBS <linked libraries> (only for shared library)\n#                         OBJECT_LIBS <object libraries to use as sources>\n#                         PARENT_TARGET <convenience parent target>\n#                         ADDITIONAL_HEADERS <header files>)\nfunction(add_compiler_rt_runtime name type)\n  if(NOT type MATCHES \"^(STATIC|SHARED)$\")\n    message(FATAL_ERROR \"type argument must be STATIC or SHARED\")\n    return()\n  endif()\n  cmake_parse_arguments(LIB\n    \"\"\n    \"PARENT_TARGET\"\n    \"OS;ARCHS;SOURCES;CFLAGS;LINK_FLAGS;DEFS;LINK_LIBS;OBJECT_LIBS;ADDITIONAL_HEADERS\"\n    ${ARGN})\n  set(libnames)\n  # Until we support this some other way, build compiler-rt runtime without LTO\n  # to allow non-LTO projects to link with it.\n  if(COMPILER_RT_HAS_FNO_LTO_FLAG)\n    set(NO_LTO_FLAGS \"-fno-lto\")\n  else()\n    set(NO_LTO_FLAGS \"\")\n  endif()\n\n  if(APPLE)\n    foreach(os ${LIB_OS})\n      # Strip out -msse3 if this isn't macOS.\n      list(LENGTH LIB_CFLAGS HAS_EXTRA_CFLAGS)\n      if(HAS_EXTRA_CFLAGS AND NOT \"${os}\" MATCHES \"^(osx)$\")\n        list(REMOVE_ITEM LIB_CFLAGS \"-msse3\")\n      endif()\n      if(type STREQUAL \"STATIC\")\n        set(libname \"${name}_${os}\")\n      else()\n        set(libname \"${name}_${os}_dynamic\")\n        set(extra_link_flags_${libname} ${DARWIN_${os}_LINK_FLAGS} ${LIB_LINK_FLAGS})\n      endif()\n      list_intersect(LIB_ARCHS_${libname} DARWIN_${os}_ARCHS LIB_ARCHS)\n      if(LIB_ARCHS_${libname})\n        list(APPEND libnames ${libname})\n        set(extra_cflags_${libname} ${DARWIN_${os}_CFLAGS} ${NO_LTO_FLAGS} ${LIB_CFLAGS})\n        set(output_name_${libname} ${libname}${COMPILER_RT_OS_SUFFIX})\n        set(sources_${libname} ${LIB_SOURCES})\n        format_object_libs(sources_${libname} ${os} ${LIB_OBJECT_LIBS})\n        get_compiler_rt_output_dir(${COMPILER_RT_DEFAULT_TARGET_ARCH} output_dir_${libname})\n        get_compiler_rt_install_dir(${COMPILER_RT_DEFAULT_TARGET_ARCH} install_dir_${libname})\n      endif()\n    endforeach()\n  else()\n    foreach(arch ${LIB_ARCHS})\n      if(NOT CAN_TARGET_${arch})\n        message(FATAL_ERROR \"Architecture ${arch} can't be targeted\")\n        return()\n      endif()\n      if(type STREQUAL \"STATIC\")\n        set(libname \"${name}-${arch}\")\n        set_output_name(output_name_${libname} ${name} ${arch})\n      else()\n        set(libname \"${name}-dynamic-${arch}\")\n        set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS} ${LIB_CFLAGS})\n        set(extra_link_flags_${libname} ${TARGET_${arch}_LINK_FLAGS} ${LIB_LINK_FLAGS})\n        if(WIN32)\n          set_output_name(output_name_${libname} ${name}_dynamic ${arch})\n        else()\n          set_output_name(output_name_${libname} ${name} ${arch})\n        endif()\n      endif()\n      set(sources_${libname} ${LIB_SOURCES})\n      format_object_libs(sources_${libname} ${arch} ${LIB_OBJECT_LIBS})\n      set(libnames ${libnames} ${libname})\n      set(extra_cflags_${libname} ${TARGET_${arch}_CFLAGS} ${NO_LTO_FLAGS} ${LIB_CFLAGS})\n      get_compiler_rt_output_dir(${arch} output_dir_${libname})\n      get_compiler_rt_install_dir(${arch} install_dir_${libname})\n    endforeach()\n  endif()\n\n  if(NOT libnames)\n    return()\n  endif()\n\n  if(LIB_PARENT_TARGET)\n    # If the parent targets aren't created we should create them\n    if(NOT TARGET ${LIB_PARENT_TARGET})\n      add_custom_target(${LIB_PARENT_TARGET})\n      set_target_properties(${LIB_PARENT_TARGET} PROPERTIES\n                            FOLDER \"Compiler-RT Misc\")\n    endif()\n    if(NOT TARGET install-${LIB_PARENT_TARGET})\n      # The parent install target specifies the parent component to scrape up\n      # anything not installed by the individual install targets, and to handle\n      # installation when running the multi-configuration generators.\n      add_custom_target(install-${LIB_PARENT_TARGET}\n                        DEPENDS ${LIB_PARENT_TARGET}\n                        COMMAND \"${CMAKE_COMMAND}\"\n                                -DCMAKE_INSTALL_COMPONENT=${LIB_PARENT_TARGET}\n                                -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n      add_custom_target(install-${LIB_PARENT_TARGET}-stripped\n                        DEPENDS ${LIB_PARENT_TARGET}\n                        COMMAND \"${CMAKE_COMMAND}\"\n                                -DCMAKE_INSTALL_COMPONENT=${LIB_PARENT_TARGET}\n                                -DCMAKE_INSTALL_DO_STRIP=1\n                                -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n      set_target_properties(install-${LIB_PARENT_TARGET} PROPERTIES\n                            FOLDER \"Compiler-RT Misc\")\n      set_target_properties(install-${LIB_PARENT_TARGET}-stripped PROPERTIES\n                            FOLDER \"Compiler-RT Misc\")\n      add_dependencies(install-compiler-rt install-${LIB_PARENT_TARGET})\n      add_dependencies(install-compiler-rt-stripped install-${LIB_PARENT_TARGET}-stripped)\n    endif()\n  endif()\n\n  foreach(libname ${libnames})\n    # If you are using a multi-configuration generator we don't generate\n    # per-library install rules, so we fall back to the parent target COMPONENT\n    if(CMAKE_CONFIGURATION_TYPES AND LIB_PARENT_TARGET)\n      set(COMPONENT_OPTION COMPONENT ${LIB_PARENT_TARGET})\n    else()\n      set(COMPONENT_OPTION COMPONENT ${libname})\n    endif()\n\n    add_library(${libname} ${type} ${sources_${libname}})\n    set_target_compile_flags(${libname} ${extra_cflags_${libname}})\n    set_target_link_flags(${libname} ${extra_link_flags_${libname}})\n    set_property(TARGET ${libname} APPEND PROPERTY\n                COMPILE_DEFINITIONS ${LIB_DEFS})\n    set_target_output_directories(${libname} ${output_dir_${libname}})\n    set_target_properties(${libname} PROPERTIES\n        OUTPUT_NAME ${output_name_${libname}})\n    set_target_properties(${libname} PROPERTIES FOLDER \"Compiler-RT Runtime\")\n    if(LIB_LINK_LIBS)\n      target_link_libraries(${libname} ${LIB_LINK_LIBS})\n    endif()\n    if(${type} STREQUAL \"SHARED\")\n      if(COMMAND llvm_setup_rpath)\n        llvm_setup_rpath(${libname})\n      endif()\n      if(WIN32 AND NOT CYGWIN AND NOT MINGW)\n        set_target_properties(${libname} PROPERTIES IMPORT_PREFIX \"\")\n        set_target_properties(${libname} PROPERTIES IMPORT_SUFFIX \".lib\")\n      endif()\n      if(APPLE)\n        # Ad-hoc sign the dylibs\n        add_custom_command(TARGET ${libname}\n          POST_BUILD  \n          COMMAND codesign --sign - $<TARGET_FILE:${libname}>\n          WORKING_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}\n        )\n      endif()\n    endif()\n    install(TARGETS ${libname}\n      ARCHIVE DESTINATION ${install_dir_${libname}}\n              ${COMPONENT_OPTION}\n      LIBRARY DESTINATION ${install_dir_${libname}}\n              ${COMPONENT_OPTION}\n      RUNTIME DESTINATION ${install_dir_${libname}}\n              ${COMPONENT_OPTION})\n\n    # We only want to generate per-library install targets if you aren't using\n    # an IDE because the extra targets get cluttered in IDEs.\n    if(NOT CMAKE_CONFIGURATION_TYPES)\n      add_custom_target(install-${libname}\n                        DEPENDS ${libname}\n                        COMMAND \"${CMAKE_COMMAND}\"\n                                -DCMAKE_INSTALL_COMPONENT=${libname}\n                                -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n      add_custom_target(install-${libname}-stripped\n                        DEPENDS ${libname}\n                        COMMAND \"${CMAKE_COMMAND}\"\n                                -DCMAKE_INSTALL_COMPONENT=${libname}\n                                -DCMAKE_INSTALL_DO_STRIP=1\n                                -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n      # If you have a parent target specified, we bind the new install target\n      # to the parent install target.\n      if(LIB_PARENT_TARGET)\n        add_dependencies(install-${LIB_PARENT_TARGET} install-${libname})\n        add_dependencies(install-${LIB_PARENT_TARGET}-stripped install-${libname}-stripped)\n      endif()\n    endif()\n    if(APPLE)\n      set_target_properties(${libname} PROPERTIES\n      OSX_ARCHITECTURES \"${LIB_ARCHS_${libname}}\")\n    endif()\n\n    if(type STREQUAL \"SHARED\")\n      rt_externalize_debuginfo(${libname})\n    endif()\n  endforeach()\n  if(LIB_PARENT_TARGET)\n    add_dependencies(${LIB_PARENT_TARGET} ${libnames})\n  endif()\nendfunction()\n\n# when cross compiling, COMPILER_RT_TEST_COMPILER_CFLAGS help\n# in compilation and linking of unittests.\nstring(REPLACE \" \" \";\" COMPILER_RT_UNITTEST_CFLAGS \"${COMPILER_RT_TEST_COMPILER_CFLAGS}\")\nset(COMPILER_RT_UNITTEST_LINK_FLAGS ${COMPILER_RT_UNITTEST_CFLAGS})\n\n# Unittests support.\nset(COMPILER_RT_GTEST_PATH ${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest)\nset(COMPILER_RT_GTEST_SOURCE ${COMPILER_RT_GTEST_PATH}/src/gtest-all.cc)\nset(COMPILER_RT_GTEST_CFLAGS\n  -DGTEST_NO_LLVM_RAW_OSTREAM=1\n  -DGTEST_HAS_RTTI=0\n  -I${COMPILER_RT_GTEST_PATH}/include\n  -I${COMPILER_RT_GTEST_PATH}\n)\n\nappend_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 COMPILER_RT_UNITTEST_CFLAGS)\nappend_list_if(COMPILER_RT_HAS_WCOVERED_SWITCH_DEFAULT_FLAG -Wno-covered-switch-default COMPILER_RT_UNITTEST_CFLAGS)\n\nif(MSVC)\n  # clang doesn't support exceptions on Windows yet.\n  list(APPEND COMPILER_RT_UNITTEST_CFLAGS -D_HAS_EXCEPTIONS=0)\n\n  # We should teach clang to understand \"#pragma intrinsic\", see PR19898.\n  list(APPEND COMPILER_RT_UNITTEST_CFLAGS -Wno-undefined-inline)\n\n  # Clang doesn't support SEH on Windows yet.\n  list(APPEND COMPILER_RT_GTEST_CFLAGS -DGTEST_HAS_SEH=0)\n\n  # gtest use a lot of stuff marked as deprecated on Windows.\n  list(APPEND COMPILER_RT_GTEST_CFLAGS -Wno-deprecated-declarations)\nendif()\n\n# Compile and register compiler-rt tests.\n# generate_compiler_rt_tests(<output object files> <test_suite> <test_name>\n#                           <test architecture>\n#                           KIND <custom prefix>\n#                           SUBDIR <subdirectory for testing binary>\n#                           SOURCES <sources to compile>\n#                           RUNTIME <tests runtime to link in>\n#                           CFLAGS <compile-time flags>\n#                           COMPILE_DEPS <compile-time dependencies>\n#                           DEPS <dependencies>\n#                           LINK_FLAGS <flags to use during linking>\n# )\nfunction(generate_compiler_rt_tests test_objects test_suite testname arch)\n  cmake_parse_arguments(TEST \"\" \"KIND;RUNTIME;SUBDIR\"\n    \"SOURCES;COMPILE_DEPS;DEPS;CFLAGS;LINK_FLAGS\" ${ARGN})\n\n  foreach(source ${TEST_SOURCES})\n    sanitizer_test_compile(\n      \"${test_objects}\" \"${source}\" \"${arch}\"\n      KIND ${TEST_KIND}\n      COMPILE_DEPS ${TEST_COMPILE_DEPS}\n      DEPS ${TEST_DEPS}\n      CFLAGS ${TEST_CFLAGS}\n      )\n  endforeach()\n\n  set(TEST_DEPS ${${test_objects}})\n\n  if(NOT \"${TEST_RUNTIME}\" STREQUAL \"\")\n    list(APPEND TEST_DEPS ${TEST_RUNTIME})\n    list(APPEND \"${test_objects}\" $<TARGET_FILE:${TEST_RUNTIME}>)\n  endif()\n\n  add_compiler_rt_test(${test_suite} \"${testname}\" \"${arch}\"\n    SUBDIR ${TEST_SUBDIR}\n    OBJECTS ${${test_objects}}\n    DEPS ${TEST_DEPS}\n    LINK_FLAGS ${TEST_LINK_FLAGS}\n    )\n  set(\"${test_objects}\" \"${${test_objects}}\" PARENT_SCOPE)\nendfunction()\n\n# Link objects into a single executable with COMPILER_RT_TEST_COMPILER,\n# using specified link flags. Make executable a part of provided\n# test_suite.\n# add_compiler_rt_test(<test_suite> <test_name> <arch>\n#                      SUBDIR <subdirectory for binary>\n#                      OBJECTS <object files>\n#                      DEPS <deps (e.g. runtime libs)>\n#                      LINK_FLAGS <link flags>)\nfunction(add_compiler_rt_test test_suite test_name arch)\n  cmake_parse_arguments(TEST \"\" \"SUBDIR\" \"OBJECTS;DEPS;LINK_FLAGS\" \"\" ${ARGN})\n  set(output_dir ${CMAKE_CURRENT_BINARY_DIR})\n  if(TEST_SUBDIR)\n    set(output_dir \"${output_dir}/${TEST_SUBDIR}\")\n  endif()\n  set(output_dir \"${output_dir}/${CMAKE_CFG_INTDIR}\")\n  file(MAKE_DIRECTORY \"${output_dir}\")\n  set(output_bin \"${output_dir}/${test_name}\")\n  if(MSVC)\n    set(output_bin \"${output_bin}.exe\")\n  endif()\n\n  # Use host compiler in a standalone build, and just-built Clang otherwise.\n  if(NOT COMPILER_RT_STANDALONE_BUILD)\n    list(APPEND TEST_DEPS clang)\n  endif()\n\n  get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)\n  list(APPEND TEST_LINK_FLAGS ${TARGET_LINK_FLAGS})\n\n  # If we're not on MSVC, include the linker flags from CMAKE but override them\n  # with the provided link flags. This ensures that flags which are required to\n  # link programs at all are included, but the changes needed for the test\n  # trump. With MSVC we can't do that because CMake is set up to run link.exe\n  # when linking, not the compiler. Here, we hack it to use the compiler\n  # because we want to use -fsanitize flags.\n  if(NOT MSVC)\n    set(TEST_LINK_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${TEST_LINK_FLAGS}\")\n    separate_arguments(TEST_LINK_FLAGS)\n  endif()\n  add_custom_command(\n    OUTPUT \"${output_bin}\"\n    COMMAND ${COMPILER_RT_TEST_COMPILER} ${TEST_OBJECTS} -o \"${output_bin}\"\n            ${TEST_LINK_FLAGS}\n    DEPENDS ${TEST_DEPS}\n    )\n  add_custom_target(T${test_name} DEPENDS \"${output_bin}\")\n  set_target_properties(T${test_name} PROPERTIES FOLDER \"Compiler-RT Tests\")\n\n  # Make the test suite depend on the binary.\n  add_dependencies(${test_suite} T${test_name})\nendfunction()\n\nmacro(add_compiler_rt_resource_file target_name file_name component)\n  set(src_file \"${CMAKE_CURRENT_SOURCE_DIR}/${file_name}\")\n  set(dst_file \"${COMPILER_RT_OUTPUT_DIR}/share/${file_name}\")\n  add_custom_command(OUTPUT ${dst_file}\n    DEPENDS ${src_file}\n    COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src_file} ${dst_file}\n    COMMENT \"Copying ${file_name}...\")\n  add_custom_target(${target_name} DEPENDS ${dst_file})\n  # Install in Clang resource directory.\n  install(FILES ${file_name}\n    DESTINATION ${COMPILER_RT_INSTALL_PATH}/share\n    COMPONENT ${component})\n  add_dependencies(${component} ${target_name})\n\n  set_target_properties(${target_name} PROPERTIES FOLDER \"Compiler-RT Misc\")\nendmacro()\n\nmacro(add_compiler_rt_script name)\n  set(dst ${COMPILER_RT_EXEC_OUTPUT_DIR}/${name})\n  set(src ${CMAKE_CURRENT_SOURCE_DIR}/${name})\n  add_custom_command(OUTPUT ${dst}\n    DEPENDS ${src}\n    COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst}\n    COMMENT \"Copying ${name}...\")\n  add_custom_target(${name} DEPENDS ${dst})\n  install(FILES ${dst}\n    PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE\n    DESTINATION ${COMPILER_RT_INSTALL_PATH}/bin)\nendmacro(add_compiler_rt_script src name)\n\n# Builds custom version of libc++ and installs it in <prefix>.\n# Can be used to build sanitized versions of libc++ for running unit tests.\n# add_custom_libcxx(<name> <prefix>\n#                   DEPS <list of build deps>\n#                   CFLAGS <list of compile flags>\n#                   USE_TOOLCHAIN)\nmacro(add_custom_libcxx name prefix)\n  if(NOT COMPILER_RT_LIBCXX_PATH)\n    message(FATAL_ERROR \"libcxx not found!\")\n  endif()\n  if(NOT COMPILER_RT_LIBCXXABI_PATH)\n    message(FATAL_ERROR \"libcxxabi not found!\")\n  endif()\n\n  cmake_parse_arguments(LIBCXX \"USE_TOOLCHAIN\" \"\" \"DEPS;CFLAGS;CMAKE_ARGS\" ${ARGN})\n\n  if(LIBCXX_USE_TOOLCHAIN)\n    set(compiler_args -DCMAKE_C_COMPILER=${COMPILER_RT_TEST_COMPILER}\n                      -DCMAKE_CXX_COMPILER=${COMPILER_RT_TEST_CXX_COMPILER})\n    if(NOT COMPILER_RT_STANDALONE_BUILD)\n      set(toolchain_deps $<TARGET_FILE:clang>)\n      set(force_deps DEPENDS $<TARGET_FILE:clang>)\n    endif()\n  else()\n    set(compiler_args -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}\n                      -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER})\n  endif()\n\n  set(STAMP_DIR ${prefix}-stamps/)\n  set(BINARY_DIR ${prefix}-bins/)\n\n  add_custom_target(${name}-clear\n    COMMAND ${CMAKE_COMMAND} -E remove_directory ${BINARY_DIR}\n    COMMAND ${CMAKE_COMMAND} -E remove_directory ${STAMP_DIR}\n    COMMENT \"Clobbering ${name} build and stamp directories\"\n    USES_TERMINAL\n    )\n  set_target_properties(${name}-clear PROPERTIES FOLDER \"Compiler-RT Misc\")\n\n  add_custom_command(\n    OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${name}-clobber-stamp\n    DEPENDS ${LIBCXX_DEPS} ${toolchain_deps}\n    COMMAND ${CMAKE_COMMAND} -E touch ${BINARY_DIR}/CMakeCache.txt\n    COMMAND ${CMAKE_COMMAND} -E touch ${STAMP_DIR}/${name}-mkdir\n    COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${name}-clobber-stamp\n    COMMENT \"Clobbering bootstrap build and stamp directories\"\n    )\n\n  add_custom_target(${name}-clobber\n    DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${name}-clobber-stamp)\n  set_target_properties(${name}-clobber PROPERTIES FOLDER \"Compiler-RT Misc\")\n\n  set(PASSTHROUGH_VARIABLES\n    CMAKE_C_COMPILER_TARGET\n    CMAKE_CXX_COMPILER_TARGET\n    CMAKE_INSTALL_PREFIX\n    CMAKE_MAKE_PROGRAM\n    CMAKE_LINKER\n    CMAKE_AR\n    CMAKE_RANLIB\n    CMAKE_NM\n    CMAKE_OBJCOPY\n    CMAKE_OBJDUMP\n    CMAKE_STRIP\n    CMAKE_SYSROOT\n    CMAKE_SYSTEM_NAME)\n  foreach(variable ${PASSTHROUGH_VARIABLES})\n    if(${variable})\n      list(APPEND CMAKE_PASSTHROUGH_VARIABLES -D${variable}=${${variable}})\n    endif()\n  endforeach()\n\n  string(REPLACE \";\" \" \" FLAGS_STRING \"${LIBCXX_CFLAGS}\")\n  set(LIBCXX_C_FLAGS \"${FLAGS_STRING}\")\n  set(LIBCXX_CXX_FLAGS \"${FLAGS_STRING}\")\n\n  ExternalProject_Add(${name}\n    DEPENDS ${name}-clobber ${LIBCXX_DEPS}\n    PREFIX ${prefix}\n    SOURCE_DIR ${COMPILER_RT_SOURCE_DIR}/cmake/CustomLibcxx\n    STAMP_DIR ${STAMP_DIR}\n    BINARY_DIR ${BINARY_DIR}\n    CMAKE_ARGS ${CMAKE_PASSTHROUGH_VARIABLES}\n               ${compiler_args}\n               -DCMAKE_C_FLAGS=${LIBCXX_C_FLAGS}\n               -DCMAKE_CXX_FLAGS=${LIBCXX_CXX_FLAGS}\n               -DCMAKE_BUILD_TYPE=Release\n               -DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY\n               -DLLVM_PATH=${LLVM_MAIN_SRC_DIR}\n               -DLLVM_BINARY_DIR=${prefix}\n               -DLLVM_LIBRARY_OUTPUT_INTDIR=${prefix}/lib\n               -DCOMPILER_RT_LIBCXX_PATH=${COMPILER_RT_LIBCXX_PATH}\n               -DCOMPILER_RT_LIBCXXABI_PATH=${COMPILER_RT_LIBCXXABI_PATH}\n               ${LIBCXX_CMAKE_ARGS}\n    INSTALL_COMMAND \"\"\n    STEP_TARGETS configure build\n    BUILD_ALWAYS 1\n    USES_TERMINAL_CONFIGURE 1\n    USES_TERMINAL_BUILD 1\n    USES_TERMINAL_INSTALL 1\n    EXCLUDE_FROM_ALL TRUE\n    BUILD_BYPRODUCTS \"${prefix}/lib/libc++.a\" \"${prefix}/lib/libc++abi.a\"\n    )\n\n  if (CMAKE_GENERATOR MATCHES \"Make\")\n    set(run_clean \"$(MAKE)\" \"-C\" \"${BINARY_DIR}\" \"clean\")\n  else()\n    set(run_clean ${CMAKE_COMMAND} --build ${BINARY_DIR} --target clean\n                                   --config \"$<CONFIGURATION>\")\n  endif()\n\n  ExternalProject_Add_Step(${name} clean\n    COMMAND ${run_clean}\n    COMMENT \"Cleaning ${name}...\"\n    DEPENDEES configure\n    ${force_deps}\n    WORKING_DIRECTORY ${BINARY_DIR}\n    EXCLUDE_FROM_MAIN 1\n    USES_TERMINAL 1\n    )\n  ExternalProject_Add_StepTargets(${name} clean)\n\n  if(LIBCXX_USE_TOOLCHAIN)\n    add_dependencies(${name}-clean ${name}-clobber)\n    set_target_properties(${name}-clean PROPERTIES\n      SOURCES ${CMAKE_CURRENT_BINARY_DIR}/${name}-clobber-stamp)\n  endif()\nendmacro()\n\nfunction(rt_externalize_debuginfo name)\n  if(NOT COMPILER_RT_EXTERNALIZE_DEBUGINFO)\n    return()\n  endif()\n\n  if(NOT COMPILER_RT_EXTERNALIZE_DEBUGINFO_SKIP_STRIP)\n    set(strip_command COMMAND xcrun strip -Sl $<TARGET_FILE:${name}>)\n  endif()\n\n  if(APPLE)\n    if(CMAKE_CXX_FLAGS MATCHES \"-flto\"\n      OR CMAKE_CXX_FLAGS_${uppercase_CMAKE_BUILD_TYPE} MATCHES \"-flto\")\n\n      set(lto_object ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${name}-lto.o)\n      set_property(TARGET ${name} APPEND_STRING PROPERTY\n        LINK_FLAGS \" -Wl,-object_path_lto -Wl,${lto_object}\")\n    endif()\n    add_custom_command(TARGET ${name} POST_BUILD\n      COMMAND xcrun dsymutil $<TARGET_FILE:${name}>\n      ${strip_command})\n  else()\n    message(FATAL_ERROR \"COMPILER_RT_EXTERNALIZE_DEBUGINFO isn't implemented for non-darwin platforms!\")\n  endif()\nendfunction()\n\n\n# Configure lit configuration files, including compiler-rt specific variables.\nfunction(configure_compiler_rt_lit_site_cfg input output)\n  set_llvm_build_mode()\n\n  get_compiler_rt_output_dir(${COMPILER_RT_DEFAULT_TARGET_ARCH} output_dir)\n\n  string(REPLACE ${CMAKE_CFG_INTDIR} ${LLVM_BUILD_MODE} COMPILER_RT_RESOLVED_TEST_COMPILER ${COMPILER_RT_TEST_COMPILER})\n  string(REPLACE ${CMAKE_CFG_INTDIR} ${LLVM_BUILD_MODE} COMPILER_RT_RESOLVED_LIBRARY_OUTPUT_DIR ${output_dir})\n\n  configure_lit_site_cfg(${input} ${output})\nendfunction()\n"
  },
  {
    "path": "runtime/cmake/BuiltinTests.cmake",
    "content": "include(CMakeCheckCompilerFlagCommonPatterns)\n\n# This function takes an OS and a list of architectures and identifies the\n# subset of the architectures list that the installed toolchain can target.\nfunction(try_compile_only output)\n  cmake_parse_arguments(ARG \"\" \"\" \"SOURCE;FLAGS\" ${ARGN})\n  if(NOT ARG_SOURCE)\n    set(ARG_SOURCE \"int foo(int x, int y) { return x + y; }\\n\")\n  endif()\n  set(SIMPLE_C ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/src.c)\n  file(WRITE ${SIMPLE_C} \"${ARG_SOURCE}\\n\")\n  string(REGEX MATCHALL \"<[A-Za-z0-9_]*>\" substitutions\n         ${CMAKE_C_COMPILE_OBJECT})\n\n  set(TRY_COMPILE_FLAGS \"${ARG_FLAGS}\")\n  if(CMAKE_C_COMPILER_ID MATCHES Clang AND CMAKE_C_COMPILER_TARGET)\n    list(APPEND TRY_COMPILE_FLAGS \"-target ${CMAKE_C_COMPILER_TARGET}\")\n  endif()\n\n  string(REPLACE \";\" \" \" extra_flags \"${TRY_COMPILE_FLAGS}\")\n\n  set(test_compile_command \"${CMAKE_C_COMPILE_OBJECT}\")\n  foreach(substitution ${substitutions})\n    if(substitution STREQUAL \"<CMAKE_C_COMPILER>\")\n      string(REPLACE \"<CMAKE_C_COMPILER>\"\n             \"${CMAKE_C_COMPILER}\" test_compile_command ${test_compile_command})\n    elseif(substitution STREQUAL \"<OBJECT>\")\n      string(REPLACE \"<OBJECT>\"\n             \"${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/test.o\"\n             test_compile_command ${test_compile_command})\n    elseif(substitution STREQUAL \"<SOURCE>\")\n      string(REPLACE \"<SOURCE>\" \"${SIMPLE_C}\" test_compile_command\n             ${test_compile_command})\n    elseif(substitution STREQUAL \"<FLAGS>\")\n      string(REPLACE \"<FLAGS>\" \"${CMAKE_C_FLAGS} ${extra_flags}\"\n             test_compile_command ${test_compile_command})\n    else()\n      string(REPLACE \"${substitution}\" \"\" test_compile_command\n             ${test_compile_command})\n    endif()\n  endforeach()\n\n  string(REPLACE \" \" \";\" test_compile_command \"${test_compile_command}\")\n\n  execute_process(\n    COMMAND ${test_compile_command}\n    RESULT_VARIABLE result\n    OUTPUT_VARIABLE TEST_OUTPUT\n    ERROR_VARIABLE TEST_ERROR\n  )\n\n  CHECK_COMPILER_FLAG_COMMON_PATTERNS(_CheckCCompilerFlag_COMMON_PATTERNS)\n  set(ERRORS_FOUND OFF)\n  foreach(var ${_CheckCCompilerFlag_COMMON_PATTERNS})\n    if(\"${var}\" STREQUAL \"FAIL_REGEX\")\n      continue()\n    endif()\n    if(\"${TEST_ERROR}\" MATCHES \"${var}\" OR \"${TEST_OUTPUT}\" MATCHES \"${var}\")\n      set(ERRORS_FOUND ON)\n    endif()\n  endforeach()\n\n  if(result EQUAL 0 AND NOT ERRORS_FOUND)\n    set(${output} True PARENT_SCOPE)\n  else()\n    file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log\n        \"Testing compiler for supporting \" ${ARGN} \":\\n\"\n        \"Command: ${test_compile_command}\\n\"\n        \"${TEST_OUTPUT}\\n${TEST_ERROR}\\n${result}\\n\")\n    set(${output} False PARENT_SCOPE)\n  endif()\nendfunction()\n\nfunction(builtin_check_c_compiler_flag flag output)\n  if(NOT DEFINED ${output})\n    message(STATUS \"Performing Test ${output}\")\n    try_compile_only(result FLAGS ${flag})\n    set(${output} ${result} CACHE INTERNAL \"Compiler supports ${flag}\")\n    if(${result})\n      message(STATUS \"Performing Test ${output} - Success\")\n    else()\n      message(STATUS \"Performing Test ${output} - Failed\")\n    endif()\n  endif()\nendfunction()\n\nfunction(builtin_check_c_compiler_source output source)\n  if(NOT DEFINED ${output})\n    message(STATUS \"Performing Test ${output}\")\n    try_compile_only(result SOURCE ${source})\n    set(${output} ${result} CACHE INTERNAL \"Compiler supports ${flag}\")\n    if(${result})\n      message(STATUS \"Performing Test ${output} - Success\")\n    else()\n      message(STATUS \"Performing Test ${output} - Failed\")\n    endif()\n  endif()\nendfunction()\n"
  },
  {
    "path": "runtime/cmake/CompilerRTCompile.cmake",
    "content": "include(CMakePushCheckState)\ninclude(CheckSymbolExists)\n\n# Because compiler-rt spends a lot of time setting up custom compile flags,\n# define a handy helper function for it. The compile flags setting in CMake\n# has serious issues that make its syntax challenging at best.\nfunction(set_target_compile_flags target)\n  set(argstring \"\")\n  foreach(arg ${ARGN})\n    set(argstring \"${argstring} ${arg}\")\n  endforeach()\n  set_property(TARGET ${target} PROPERTY COMPILE_FLAGS \"${argstring}\")\nendfunction()\n\nfunction(set_target_link_flags target)\n  set(argstring \"\")\n  foreach(arg ${ARGN})\n    set(argstring \"${argstring} ${arg}\")\n  endforeach()\n  set_property(TARGET ${target} PROPERTY LINK_FLAGS \"${argstring}\")\nendfunction()\n\n# Set the variable var_PYBOOL to True if var holds a true-ish string,\n# otherwise set it to False.\nmacro(pythonize_bool var)\n  if (${var})\n    set(${var}_PYBOOL True)\n  else()\n    set(${var}_PYBOOL False)\n  endif()\nendmacro()\n\n# Appends value to all lists in ARGN, if the condition is true.\nmacro(append_list_if condition value)\n  if(${condition})\n    foreach(list ${ARGN})\n      list(APPEND ${list} ${value})\n    endforeach()\n  endif()\nendmacro()\n\n# Appends value to all strings in ARGN, if the condition is true.\nmacro(append_string_if condition value)\n  if(${condition})\n    foreach(str ${ARGN})\n      set(${str} \"${${str}} ${value}\")\n    endforeach()\n  endif()\nendmacro()\n\nmacro(append_rtti_flag polarity list)\n  if(${polarity})\n    append_list_if(COMPILER_RT_HAS_FRTTI_FLAG -frtti ${list})\n    append_list_if(COMPILER_RT_HAS_GR_FLAG /GR ${list})\n  else()\n    append_list_if(COMPILER_RT_HAS_FNO_RTTI_FLAG -fno-rtti ${list})\n    append_list_if(COMPILER_RT_HAS_GR_FLAG /GR- ${list})\n  endif()\nendmacro()\n\nmacro(list_intersect output input1 input2)\n  set(${output})\n  foreach(it ${${input1}})\n    list(FIND ${input2} ${it} index)\n    if( NOT (index EQUAL -1))\n      list(APPEND ${output} ${it})\n    endif()\n  endforeach()\nendmacro()\n\nfunction(list_replace input_list old new)\n  set(replaced_list)\n  foreach(item ${${input_list}})\n    if(${item} STREQUAL ${old})\n      list(APPEND replaced_list ${new})\n    else()\n      list(APPEND replaced_list ${item})\n    endif()\n  endforeach()\n  set(${input_list} \"${replaced_list}\" PARENT_SCOPE)\nendfunction()\n\n# Takes ${ARGN} and puts only supported architectures in @out_var list.\nfunction(filter_available_targets out_var)\n  set(archs ${${out_var}})\n  foreach(arch ${ARGN})\n    list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX)\n    if(NOT (ARCH_INDEX EQUAL -1) AND CAN_TARGET_${arch})\n      list(APPEND archs ${arch})\n    endif()\n  endforeach()\n  set(${out_var} ${archs} PARENT_SCOPE)\nendfunction()\n\n# Add $arch as supported with no additional flags.\nmacro(add_default_target_arch arch)\n  set(TARGET_${arch}_CFLAGS \"\")\n  set(CAN_TARGET_${arch} 1)\n  list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch})\nendmacro()\n\nfunction(check_compile_definition def argstring out_var)\n  if(\"${def}\" STREQUAL \"\")\n    set(${out_var} TRUE PARENT_SCOPE)\n    return()\n  endif()\n  cmake_push_check_state()\n  set(CMAKE_REQUIRED_FLAGS \"${CMAKE_REQUIRED_FLAGS} ${argstring}\")\n  check_symbol_exists(${def} \"\" ${out_var})\n  cmake_pop_check_state()\nendfunction()\n\n# test_target_arch(<arch> <def> <target flags...>)\n# Checks if architecture is supported: runs host compiler with provided\n# flags to verify that:\n#   1) <def> is defined (if non-empty)\n#   2) simple file can be successfully built.\n# If successful, saves target flags for this architecture.\nmacro(test_target_arch arch def)\n  set(TARGET_${arch}_CFLAGS ${ARGN})\n  set(TARGET_${arch}_LINK_FLAGS ${ARGN})\n  set(argstring \"\")\n  foreach(arg ${ARGN})\n    set(argstring \"${argstring} ${arg}\")\n  endforeach()\n  check_compile_definition(\"${def}\" \"${argstring}\" HAS_${arch}_DEF)\n  if(NOT DEFINED CAN_TARGET_${arch})\n    if(NOT HAS_${arch}_DEF)\n      set(CAN_TARGET_${arch} FALSE)\n    elseif(TEST_COMPILE_ONLY)\n      try_compile_only(CAN_TARGET_${arch} FLAGS ${TARGET_${arch}_CFLAGS})\n    else()\n      set(FLAG_NO_EXCEPTIONS \"\")\n      if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG)\n        set(FLAG_NO_EXCEPTIONS \" -fno-exceptions \")\n      endif()\n      set(SAVED_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS})\n      set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${argstring}\")\n      try_compile(CAN_TARGET_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE}\n                  COMPILE_DEFINITIONS \"${TARGET_${arch}_CFLAGS} ${FLAG_NO_EXCEPTIONS}\"\n                  OUTPUT_VARIABLE TARGET_${arch}_OUTPUT)\n      set(CMAKE_EXE_LINKER_FLAGS ${SAVED_CMAKE_EXE_LINKER_FLAGS})\n    endif()\n  endif()\n  if(${CAN_TARGET_${arch}})\n    list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch})\n  elseif(\"${COMPILER_RT_DEFAULT_TARGET_ARCH}\" STREQUAL \"${arch}\" AND\n         COMPILER_RT_HAS_EXPLICIT_DEFAULT_TARGET_TRIPLE)\n    # Bail out if we cannot target the architecture we plan to test.\n    message(FATAL_ERROR \"Cannot compile for ${arch}:\\n${TARGET_${arch}_OUTPUT}\")\n  endif()\nendmacro()\n\nmacro(detect_target_arch)\n  check_symbol_exists(__arm__ \"\" __ARM)\n  check_symbol_exists(__aarch64__ \"\" __AARCH64)\n  check_symbol_exists(__x86_64__ \"\" __X86_64)\n  check_symbol_exists(__i386__ \"\" __I386)\n  check_symbol_exists(__mips__ \"\" __MIPS)\n  check_symbol_exists(__mips64__ \"\" __MIPS64)\n  check_symbol_exists(__powerpc64__ \"\" __PPC64)\n  check_symbol_exists(__powerpc64le__ \"\" __PPC64LE)\n  check_symbol_exists(__riscv \"\" __RISCV)\n  check_symbol_exists(__s390x__ \"\" __S390X)\n  check_symbol_exists(__sparc \"\" __SPARC)\n  check_symbol_exists(__sparcv9 \"\" __SPARCV9)\n  check_symbol_exists(__wasm32__ \"\" __WEBASSEMBLY32)\n  check_symbol_exists(__wasm64__ \"\" __WEBASSEMBLY64)\n  check_symbol_exists(__ve__ \"\" __VE)\n  if(__ARM)\n    add_default_target_arch(arm)\n  elseif(__AARCH64)\n    add_default_target_arch(aarch64)\n  elseif(__X86_64)\n    add_default_target_arch(x86_64)\n  elseif(__I386)\n    add_default_target_arch(i386)\n  elseif(__MIPS64) # must be checked before __MIPS\n    add_default_target_arch(mips64)\n  elseif(__MIPS)\n    add_default_target_arch(mips)\n  elseif(__PPC64)\n    add_default_target_arch(powerpc64)\n  elseif(__PPC64LE)\n    add_default_target_arch(powerpc64le)\n  elseif(__RISCV)\n    if(CMAKE_SIZEOF_VOID_P EQUAL \"4\")\n      add_default_target_arch(riscv32)\n    elseif(CMAKE_SIZEOF_VOID_P EQUAL \"8\")\n      add_default_target_arch(riscv64)\n    else()\n      message(FATAL_ERROR \"Unsupport XLEN for RISC-V\")\n    endif()\n  elseif(__S390X)\n    add_default_target_arch(s390x)\n  elseif(__SPARCV9)\n    add_default_target_arch(sparcv9)\n  elseif(__SPARC)\n    add_default_target_arch(sparc)\n  elseif(__WEBASSEMBLY32)\n    add_default_target_arch(wasm32)\n  elseif(__WEBASSEMBLY64)\n    add_default_target_arch(wasm64)\n  elseif(__VE)\n    add_default_target_arch(ve)\n  endif()\nendmacro()\n\nmacro(load_llvm_config)\n  if (NOT LLVM_CONFIG_PATH)\n    find_program(LLVM_CONFIG_PATH \"llvm-config\"\n                 DOC \"Path to llvm-config binary\")\n    if (NOT LLVM_CONFIG_PATH)\n      message(WARNING \"UNSUPPORTED COMPILER-RT CONFIGURATION DETECTED: \"\n                      \"llvm-config not found.\\n\"\n                      \"Reconfigure with -DLLVM_CONFIG_PATH=path/to/llvm-config.\")\n    endif()\n  endif()\n  if (LLVM_CONFIG_PATH)\n    execute_process(\n      COMMAND ${LLVM_CONFIG_PATH} \"--obj-root\" \"--bindir\" \"--libdir\" \"--src-root\" \"--includedir\"\n      RESULT_VARIABLE HAD_ERROR\n      OUTPUT_VARIABLE CONFIG_OUTPUT)\n    if (HAD_ERROR)\n      message(FATAL_ERROR \"llvm-config failed with status ${HAD_ERROR}\")\n    endif()\n    string(REGEX REPLACE \"[ \\t]*[\\r\\n]+[ \\t]*\" \";\" CONFIG_OUTPUT ${CONFIG_OUTPUT})\n    list(GET CONFIG_OUTPUT 0 BINARY_DIR)\n    list(GET CONFIG_OUTPUT 1 TOOLS_BINARY_DIR)\n    list(GET CONFIG_OUTPUT 2 LIBRARY_DIR)\n    list(GET CONFIG_OUTPUT 3 MAIN_SRC_DIR)\n    list(GET CONFIG_OUTPUT 4 INCLUDE_DIR)\n\n    set(LLVM_BINARY_DIR ${BINARY_DIR} CACHE PATH \"Path to LLVM build tree\")\n    set(LLVM_LIBRARY_DIR ${LIBRARY_DIR} CACHE PATH \"Path to llvm/lib\")\n    set(LLVM_MAIN_SRC_DIR ${MAIN_SRC_DIR} CACHE PATH \"Path to LLVM source tree\")\n    set(LLVM_TOOLS_BINARY_DIR ${TOOLS_BINARY_DIR} CACHE PATH \"Path to llvm/bin\")\n    set(LLVM_INCLUDE_DIR ${INCLUDE_DIR} CACHE PATH \"Paths to LLVM headers\")\n\n    # Detect if we have the LLVMXRay and TestingSupport library installed and\n    # available from llvm-config.\n    execute_process(\n      COMMAND ${LLVM_CONFIG_PATH} \"--ldflags\" \"--libs\" \"xray\"\n      RESULT_VARIABLE HAD_ERROR\n      OUTPUT_VARIABLE CONFIG_OUTPUT\n      ERROR_QUIET)\n    if (HAD_ERROR)\n      message(WARNING \"llvm-config finding xray failed with status ${HAD_ERROR}\")\n      set(COMPILER_RT_HAS_LLVMXRAY FALSE)\n    else()\n      string(REGEX REPLACE \"[ \\t]*[\\r\\n]+[ \\t]*\" \";\" CONFIG_OUTPUT ${CONFIG_OUTPUT})\n      list(GET CONFIG_OUTPUT 0 LDFLAGS)\n      list(GET CONFIG_OUTPUT 1 LIBLIST)\n      file(TO_CMAKE_PATH \"${LDFLAGS}\" LDFLAGS)\n      file(TO_CMAKE_PATH \"${LIBLIST}\" LIBLIST)\n      set(LLVM_XRAY_LDFLAGS ${LDFLAGS} CACHE STRING \"Linker flags for LLVMXRay library\")\n      set(LLVM_XRAY_LIBLIST ${LIBLIST} CACHE STRING \"Library list for LLVMXRay\")\n      set(COMPILER_RT_HAS_LLVMXRAY TRUE)\n    endif()\n\n    set(COMPILER_RT_HAS_LLVMTESTINGSUPPORT FALSE)\n    execute_process(\n      COMMAND ${LLVM_CONFIG_PATH} \"--ldflags\" \"--libs\" \"testingsupport\"\n      RESULT_VARIABLE HAD_ERROR\n      OUTPUT_VARIABLE CONFIG_OUTPUT\n      ERROR_QUIET)\n    if (HAD_ERROR)\n      message(WARNING \"llvm-config finding testingsupport failed with status ${HAD_ERROR}\")\n    elseif(COMPILER_RT_INCLUDE_TESTS)\n      string(REGEX REPLACE \"[ \\t]*[\\r\\n]+[ \\t]*\" \";\" CONFIG_OUTPUT ${CONFIG_OUTPUT})\n      list(GET CONFIG_OUTPUT 0 LDFLAGS)\n      list(GET CONFIG_OUTPUT 1 LIBLIST)\n      if (LIBLIST STREQUAL \"\")\n        message(WARNING \"testingsupport library not installed, some tests will be skipped\")\n      else()\n        file(TO_CMAKE_PATH \"${LDFLAGS}\" LDFLAGS)\n        file(TO_CMAKE_PATH \"${LIBLIST}\" LIBLIST)\n        set(LLVM_TESTINGSUPPORT_LDFLAGS ${LDFLAGS} CACHE STRING \"Linker flags for LLVMTestingSupport library\")\n        set(LLVM_TESTINGSUPPORT_LIBLIST ${LIBLIST} CACHE STRING \"Library list for LLVMTestingSupport\")\n        set(COMPILER_RT_HAS_LLVMTESTINGSUPPORT TRUE)\n      endif()\n    endif()\n\n    # Make use of LLVM CMake modules.\n    # --cmakedir is supported since llvm r291218 (4.0 release)\n    execute_process(\n      COMMAND ${LLVM_CONFIG_PATH} --cmakedir\n      RESULT_VARIABLE HAD_ERROR\n      OUTPUT_VARIABLE CONFIG_OUTPUT)\n    if(NOT HAD_ERROR)\n      string(STRIP \"${CONFIG_OUTPUT}\" LLVM_CMAKE_PATH_FROM_LLVM_CONFIG)\n      file(TO_CMAKE_PATH ${LLVM_CMAKE_PATH_FROM_LLVM_CONFIG} LLVM_CMAKE_PATH)\n    else()\n      file(TO_CMAKE_PATH ${LLVM_BINARY_DIR} LLVM_BINARY_DIR_CMAKE_STYLE)\n      set(LLVM_CMAKE_PATH \"${LLVM_BINARY_DIR_CMAKE_STYLE}/lib${LLVM_LIBDIR_SUFFIX}/cmake/llvm\")\n    endif()\n\n    list(APPEND CMAKE_MODULE_PATH \"${LLVM_CMAKE_PATH}\")\n    # Get some LLVM variables from LLVMConfig.\n    include(\"${LLVM_CMAKE_PATH}/LLVMConfig.cmake\")\n\n    set(LLVM_LIBRARY_OUTPUT_INTDIR\n      ${LLVM_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib${LLVM_LIBDIR_SUFFIX})\n  endif()\nendmacro()\n\nmacro(construct_compiler_rt_default_triple)\n  if(COMPILER_RT_DEFAULT_TARGET_ONLY)\n    if(DEFINED COMPILER_RT_DEFAULT_TARGET_TRIPLE)\n      message(FATAL_ERROR \"COMPILER_RT_DEFAULT_TARGET_TRIPLE isn't supported when building for default target only\")\n    endif()\n    set(COMPILER_RT_DEFAULT_TARGET_TRIPLE ${CMAKE_C_COMPILER_TARGET})\n  else()\n    set(COMPILER_RT_DEFAULT_TARGET_TRIPLE ${TARGET_TRIPLE} CACHE STRING\n          \"Default triple for which compiler-rt runtimes will be built.\")\n  endif()\n\n  if(DEFINED COMPILER_RT_TEST_TARGET_TRIPLE)\n    # Backwards compatibility: this variable used to be called\n    # COMPILER_RT_TEST_TARGET_TRIPLE.\n    set(COMPILER_RT_DEFAULT_TARGET_TRIPLE ${COMPILER_RT_TEST_TARGET_TRIPLE})\n  endif()\n\n  string(REPLACE \"-\" \";\" TARGET_TRIPLE_LIST ${COMPILER_RT_DEFAULT_TARGET_TRIPLE})\n  list(GET TARGET_TRIPLE_LIST 0 COMPILER_RT_DEFAULT_TARGET_ARCH)\n  # Determine if test target triple is specified explicitly, and doesn't match the\n  # default.\n  if(NOT COMPILER_RT_DEFAULT_TARGET_TRIPLE STREQUAL TARGET_TRIPLE)\n    set(COMPILER_RT_HAS_EXPLICIT_DEFAULT_TARGET_TRIPLE TRUE)\n  else()\n    set(COMPILER_RT_HAS_EXPLICIT_DEFAULT_TARGET_TRIPLE FALSE)\n  endif()\nendmacro()\n\n# Filter out generic versions of routines that are re-implemented in an\n# architecture specific manner. This prevents multiple definitions of the same\n# symbols, making the symbol selection non-deterministic.\n#\n# We follow the convention that a source file that exists in a sub-directory\n# (e.g. `ppc/divtc3.c`) is architecture-specific and that if a generic\n# implementation exists it will be a top-level source file with the same name\n# modulo the file extension (e.g. `divtc3.c`).\nfunction(filter_builtin_sources inout_var name)\n  set(intermediate ${${inout_var}})\n  foreach(_file ${intermediate})\n    get_filename_component(_file_dir ${_file} DIRECTORY)\n    if (NOT \"${_file_dir}\" STREQUAL \"\")\n      # Architecture specific file. If a generic version exists, print a notice\n      # and ensure that it is removed from the file list.\n      get_filename_component(_name ${_file} NAME)\n      string(REGEX REPLACE \"\\\\.S$\" \".c\" _cname \"${_name}\")\n      if(EXISTS \"${CMAKE_CURRENT_SOURCE_DIR}/${_cname}\")\n        message(STATUS \"For ${name} builtins preferring ${_file} to ${_cname}\")\n        list(REMOVE_ITEM intermediate ${_cname})\n      endif()\n    endif()\n  endforeach()\n  set(${inout_var} ${intermediate} PARENT_SCOPE)\nendfunction()\n\nfunction(get_compiler_rt_target arch variable)\n  string(FIND ${COMPILER_RT_DEFAULT_TARGET_TRIPLE} \"-\" dash_index)\n  string(SUBSTRING ${COMPILER_RT_DEFAULT_TARGET_TRIPLE} ${dash_index} -1 triple_suffix)\n  if(COMPILER_RT_DEFAULT_TARGET_ONLY)\n    # Use exact spelling when building only for the target specified to CMake.\n    set(target \"${COMPILER_RT_DEFAULT_TARGET_TRIPLE}\")\n  elseif(ANDROID AND ${arch} STREQUAL \"i386\")\n    set(target \"i686${triple_suffix}\")\n  else()\n    set(target \"${arch}${triple_suffix}\")\n  endif()\n  set(${variable} ${target} PARENT_SCOPE)\nendfunction()\n\nfunction(get_compiler_rt_install_dir arch install_dir)\n  if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)\n    get_compiler_rt_target(${arch} target)\n    set(${install_dir} ${COMPILER_RT_INSTALL_PATH}/lib/${target} PARENT_SCOPE)\n  else()\n    set(${install_dir} ${COMPILER_RT_LIBRARY_INSTALL_DIR} PARENT_SCOPE)\n  endif()\nendfunction()\n\nfunction(get_compiler_rt_output_dir arch output_dir)\n  if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)\n    get_compiler_rt_target(${arch} target)\n    set(${output_dir} ${COMPILER_RT_OUTPUT_DIR}/lib/${target} PARENT_SCOPE)\n  else()\n    set(${output_dir} ${COMPILER_RT_LIBRARY_OUTPUT_DIR} PARENT_SCOPE)\n  endif()\nendfunction()\n\n# compiler_rt_process_sources(\n#   <OUTPUT_VAR>\n#   <SOURCE_FILE> ...\n#  [ADDITIONAL_HEADERS <header> ...]\n# )\n#\n# Process the provided sources and write the list of new sources\n# into `<OUTPUT_VAR>`.\n#\n# ADDITIONAL_HEADERS     - Adds the supplied header to list of sources for IDEs.\n#\n# This function is very similar to `llvm_process_sources()` but exists here\n# because we need to support standalone builds of compiler-rt.\nfunction(compiler_rt_process_sources OUTPUT_VAR)\n  cmake_parse_arguments(\n    ARG\n    \"\"\n    \"\"\n    \"ADDITIONAL_HEADERS\"\n    ${ARGN}\n  )\n  set(sources ${ARG_UNPARSED_ARGUMENTS})\n  set(headers \"\")\n  if (XCODE OR MSVC_IDE OR CMAKE_EXTRA_GENERATOR)\n    # For IDEs we need to tell CMake about header files.\n    # Otherwise they won't show up in UI.\n    set(headers ${ARG_ADDITIONAL_HEADERS})\n    list(LENGTH headers headers_length)\n    if (${headers_length} GREATER 0)\n      set_source_files_properties(${headers}\n        PROPERTIES HEADER_FILE_ONLY ON)\n    endif()\n  endif()\n  set(\"${OUTPUT_VAR}\" ${sources} ${headers} PARENT_SCOPE)\nendfunction()\n\n# Create install targets for a library and its parent component (if specified).\nfunction(add_compiler_rt_install_targets name)\n  cmake_parse_arguments(ARG \"\" \"PARENT_TARGET\" \"\" ${ARGN})\n\n  if(ARG_PARENT_TARGET AND NOT TARGET install-${ARG_PARENT_TARGET})\n    # The parent install target specifies the parent component to scrape up\n    # anything not installed by the individual install targets, and to handle\n    # installation when running the multi-configuration generators.\n    add_custom_target(install-${ARG_PARENT_TARGET}\n                      DEPENDS ${ARG_PARENT_TARGET}\n                      COMMAND \"${CMAKE_COMMAND}\"\n                              -DCMAKE_INSTALL_COMPONENT=${ARG_PARENT_TARGET}\n                              -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n    add_custom_target(install-${ARG_PARENT_TARGET}-stripped\n                      DEPENDS ${ARG_PARENT_TARGET}\n                      COMMAND \"${CMAKE_COMMAND}\"\n                              -DCMAKE_INSTALL_COMPONENT=${ARG_PARENT_TARGET}\n                              -DCMAKE_INSTALL_DO_STRIP=1\n                              -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n    set_target_properties(install-${ARG_PARENT_TARGET} PROPERTIES\n                          FOLDER \"Compiler-RT Misc\")\n    set_target_properties(install-${ARG_PARENT_TARGET}-stripped PROPERTIES\n                          FOLDER \"Compiler-RT Misc\")\n    add_dependencies(install-compiler-rt install-${ARG_PARENT_TARGET})\n    add_dependencies(install-compiler-rt-stripped install-${ARG_PARENT_TARGET}-stripped)\n  endif()\n\n  # We only want to generate per-library install targets if you aren't using\n  # an IDE because the extra targets get cluttered in IDEs.\n  if(NOT CMAKE_CONFIGURATION_TYPES)\n    add_custom_target(install-${name}\n                      DEPENDS ${name}\n                      COMMAND \"${CMAKE_COMMAND}\"\n                              -DCMAKE_INSTALL_COMPONENT=${name}\n                              -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n    add_custom_target(install-${name}-stripped\n                      DEPENDS ${name}\n                      COMMAND \"${CMAKE_COMMAND}\"\n                              -DCMAKE_INSTALL_COMPONENT=${name}\n                              -DCMAKE_INSTALL_DO_STRIP=1\n                              -P \"${CMAKE_BINARY_DIR}/cmake_install.cmake\")\n    # If you have a parent target specified, we bind the new install target\n    # to the parent install target.\n    if(LIB_PARENT_TARGET)\n      add_dependencies(install-${LIB_PARENT_TARGET} install-${name})\n      add_dependencies(install-${LIB_PARENT_TARGET}-stripped install-${name}-stripped)\n    endif()\n  endif()\nendfunction()\n"
  },
  {
    "path": "runtime/cmake/CompilerRTDarwinUtils.cmake",
    "content": "include(CMakeParseArguments)\n\n# On OS X SDKs can be installed anywhere on the base system and xcode-select can\n# set the default Xcode to use. This function finds the SDKs that are present in\n# the current Xcode.\nfunction(find_darwin_sdk_dir var sdk_name)\n  set(DARWIN_${sdk_name}_CACHED_SYSROOT \"\" CACHE STRING \"Darwin SDK path for SDK ${sdk_name}.\")\n  set(DARWIN_PREFER_PUBLIC_SDK OFF CACHE BOOL \"Prefer Darwin public SDK, even when an internal SDK is present.\")\n\n  if(DARWIN_${sdk_name}_CACHED_SYSROOT)\n    set(${var} ${DARWIN_${sdk_name}_CACHED_SYSROOT} PARENT_SCOPE)\n    return()\n  endif()\n  if(NOT DARWIN_PREFER_PUBLIC_SDK)\n    # Let's first try the internal SDK, otherwise use the public SDK.\n    execute_process(\n      COMMAND xcodebuild -version -sdk ${sdk_name}.internal Path\n      RESULT_VARIABLE result_process\n      OUTPUT_VARIABLE var_internal\n      OUTPUT_STRIP_TRAILING_WHITESPACE\n      ERROR_FILE /dev/null\n    )\n  endif()\n  if((NOT result_process EQUAL 0) OR \"\" STREQUAL \"${var_internal}\")\n    execute_process(\n      COMMAND xcodebuild -version -sdk ${sdk_name} Path\n      RESULT_VARIABLE result_process\n      OUTPUT_VARIABLE var_internal\n      OUTPUT_STRIP_TRAILING_WHITESPACE\n      ERROR_FILE /dev/null\n    )\n  else()\n    set(${var}_INTERNAL ${var_internal} PARENT_SCOPE)\n  endif()\n  if(result_process EQUAL 0)\n    set(${var} ${var_internal} PARENT_SCOPE)\n  endif()\n  set(DARWIN_${sdk_name}_CACHED_SYSROOT ${var_internal} CACHE STRING \"Darwin SDK path for SDK ${sdk_name}.\" FORCE)\nendfunction()\n\n# There isn't a clear mapping of what architectures are supported with a given\n# target platform, but ld's version output does list the architectures it can\n# link for.\nfunction(darwin_get_toolchain_supported_archs output_var)\n  execute_process(\n    COMMAND ld -v\n    ERROR_VARIABLE LINKER_VERSION)\n\n  string(REGEX MATCH \"configured to support archs: ([^\\n]+)\"\n         ARCHES_MATCHED \"${LINKER_VERSION}\")\n  if(ARCHES_MATCHED)\n    set(ARCHES \"${CMAKE_MATCH_1}\")\n    message(STATUS \"Got ld supported ARCHES: ${ARCHES}\")\n    string(REPLACE \" \" \";\" ARCHES ${ARCHES})\n  else()\n    # If auto-detecting fails, fall back to a default set\n    message(WARNING \"Detecting supported architectures from 'ld -v' failed. Returning default set.\")\n    set(ARCHES \"i386;x86_64;armv7;armv7s;arm64\")\n  endif()\n  \n  set(${output_var} ${ARCHES} PARENT_SCOPE)\nendfunction()\n\n# This function takes an OS and a list of architectures and identifies the\n# subset of the architectures list that the installed toolchain can target.\nfunction(darwin_test_archs os valid_archs)\n  if(${valid_archs})\n    message(STATUS \"Using cached valid architectures for ${os}.\")\n    return()\n  endif()\n\n  set(archs ${ARGN})\n  if(NOT TEST_COMPILE_ONLY)\n    message(STATUS \"Finding valid architectures for ${os}...\")\n    set(SIMPLE_C ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/src.c)\n    file(WRITE ${SIMPLE_C} \"#include <stdio.h>\\nint main() { printf(__FILE__); return 0; }\\n\")\n  \n    set(os_linker_flags)\n    foreach(flag ${DARWIN_${os}_LINK_FLAGS})\n      set(os_linker_flags \"${os_linker_flags} ${flag}\")\n    endforeach()\n  endif()\n\n  # The simple program will build for x86_64h on the simulator because it is \n  # compatible with x86_64 libraries (mostly), but since x86_64h isn't actually\n  # a valid or useful architecture for the iOS simulator we should drop it.\n  if(${os} MATCHES \"^(iossim|tvossim|watchossim)$\")\n    list(REMOVE_ITEM archs \"x86_64h\")\n  endif()\n\n  set(working_archs)\n  foreach(arch ${archs})\n   \n    set(arch_linker_flags \"-arch ${arch} ${os_linker_flags}\")\n    if(TEST_COMPILE_ONLY)\n      try_compile_only(CAN_TARGET_${os}_${arch} -v -arch ${arch} ${DARWIN_${os}_CFLAGS})\n    else()\n      set(SAVED_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS})\n      set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${arch_linker_flags}\")\n      try_compile(CAN_TARGET_${os}_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_C}\n                  COMPILE_DEFINITIONS \"-v -arch ${arch}\" ${DARWIN_${os}_CFLAGS}\n                  OUTPUT_VARIABLE TEST_OUTPUT)\n      set(CMAKE_EXE_LINKER_FLAGS ${SAVED_CMAKE_EXE_LINKER_FLAGS})\n    endif()\n    if(${CAN_TARGET_${os}_${arch}})\n      list(APPEND working_archs ${arch})\n    else()\n      file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log\n        \"Testing compiler for supporting ${os}-${arch}:\\n\"\n        \"${TEST_OUTPUT}\\n\")\n    endif()\n  endforeach()\n  set(${valid_archs} ${working_archs}\n    CACHE STRING \"List of valid architectures for platform ${os}.\")\nendfunction()\n\n# This function checks the host cpusubtype to see if it is post-haswell. Haswell\n# and later machines can run x86_64h binaries. Haswell is cpusubtype 8.\nfunction(darwin_filter_host_archs input output)\n  list_intersect(tmp_var DARWIN_osx_ARCHS ${input})\n  execute_process(\n    COMMAND sysctl hw.cpusubtype\n    OUTPUT_VARIABLE SUBTYPE)\n\n  string(REGEX MATCH \"hw.cpusubtype: ([0-9]*)\"\n         SUBTYPE_MATCHED \"${SUBTYPE}\")\n  set(HASWELL_SUPPORTED Off)\n  if(SUBTYPE_MATCHED)\n    if(${CMAKE_MATCH_1} GREATER 7)\n      set(HASWELL_SUPPORTED On)\n    endif()\n  endif()\n  if(NOT HASWELL_SUPPORTED)\n    list(REMOVE_ITEM tmp_var x86_64h)\n  endif()\n  set(${output} ${tmp_var} PARENT_SCOPE)\nendfunction()\n\n# Read and process the exclude file into a list of symbols\nfunction(darwin_read_list_from_file output_var file)\n  if(EXISTS ${file})\n    file(READ ${file} EXCLUDES)\n    string(REPLACE \"\\n\" \";\" EXCLUDES ${EXCLUDES})\n    set(${output_var} ${EXCLUDES} PARENT_SCOPE)\n  endif()\nendfunction()\n\n# this function takes an OS, architecture and minimum version and provides a\n# list of builtin functions to exclude\nfunction(darwin_find_excluded_builtins_list output_var)\n  cmake_parse_arguments(LIB\n    \"\"\n    \"OS;ARCH;MIN_VERSION\"\n    \"\"\n    ${ARGN})\n\n  if(NOT LIB_OS OR NOT LIB_ARCH)\n    message(FATAL_ERROR \"Must specify OS and ARCH to darwin_find_excluded_builtins_list!\")\n  endif()\n\n  darwin_read_list_from_file(${LIB_OS}_BUILTINS\n    ${DARWIN_EXCLUDE_DIR}/${LIB_OS}.txt)\n  darwin_read_list_from_file(${LIB_OS}_${LIB_ARCH}_BASE_BUILTINS\n    ${DARWIN_EXCLUDE_DIR}/${LIB_OS}-${LIB_ARCH}.txt)\n\n  if(LIB_MIN_VERSION)\n    file(GLOB builtin_lists ${DARWIN_EXCLUDE_DIR}/${LIB_OS}*-${LIB_ARCH}.txt)\n    foreach(builtin_list ${builtin_lists})\n      string(REGEX MATCH \"${LIB_OS}([0-9\\\\.]*)-${LIB_ARCH}.txt\" VERSION_MATCHED \"${builtin_list}\")\n      if (VERSION_MATCHED AND NOT CMAKE_MATCH_1 VERSION_LESS LIB_MIN_VERSION)\n        if(NOT smallest_version)\n          set(smallest_version ${CMAKE_MATCH_1})\n        elseif(CMAKE_MATCH_1 VERSION_LESS smallest_version)\n          set(smallest_version ${CMAKE_MATCH_1})\n        endif()\n      endif()\n    endforeach()\n\n    if(smallest_version)\n      darwin_read_list_from_file(${LIB_ARCH}_${LIB_OS}_BUILTINS\n        ${DARWIN_EXCLUDE_DIR}/${LIB_OS}${smallest_version}-${LIB_ARCH}.txt)\n    endif()\n  endif()\n  \n  set(${output_var}\n      ${${LIB_ARCH}_${LIB_OS}_BUILTINS}\n      ${${LIB_OS}_${LIB_ARCH}_BASE_BUILTINS}\n      ${${LIB_OS}_BUILTINS} PARENT_SCOPE)\nendfunction()\n\n# adds a single builtin library for a single OS & ARCH\nmacro(darwin_add_builtin_library name suffix)\n  cmake_parse_arguments(LIB\n    \"\"\n    \"PARENT_TARGET;OS;ARCH\"\n    \"SOURCES;CFLAGS;DEFS\"\n    ${ARGN})\n  set(libname \"${name}.${suffix}_${LIB_ARCH}_${LIB_OS}\")\n  add_library(${libname} STATIC ${LIB_SOURCES})\n  if(DARWIN_${LIB_OS}_SYSROOT)\n    set(sysroot_flag -isysroot ${DARWIN_${LIB_OS}_SYSROOT})\n  endif()\n\n  # Make a copy of the compilation flags.\n  set(builtin_cflags ${LIB_CFLAGS})\n\n  # Strip out any inappropriate flags for the target.\n  if(\"${LIB_ARCH}\" MATCHES \"^(armv7|armv7k|armv7s)$\")\n    set(builtin_cflags \"\")\n    foreach(cflag \"${LIB_CFLAGS}\")\n      string(REPLACE \"-fomit-frame-pointer\" \"\" cflag \"${cflag}\")\n      list(APPEND builtin_cflags ${cflag})\n    endforeach(cflag)\n  endif()\n\n  set_target_compile_flags(${libname}\n    ${sysroot_flag}\n    ${DARWIN_${LIB_OS}_BUILTIN_MIN_VER_FLAG}\n    ${builtin_cflags})\n  set_property(TARGET ${libname} APPEND PROPERTY\n      COMPILE_DEFINITIONS ${LIB_DEFS})\n  set_target_properties(${libname} PROPERTIES\n      OUTPUT_NAME ${libname}${COMPILER_RT_OS_SUFFIX})\n  set_target_properties(${libname} PROPERTIES\n    OSX_ARCHITECTURES ${LIB_ARCH})\n\n  if(LIB_PARENT_TARGET)\n    add_dependencies(${LIB_PARENT_TARGET} ${libname})\n  endif()\n\n  list(APPEND ${LIB_OS}_${suffix}_libs ${libname})\n  list(APPEND ${LIB_OS}_${suffix}_lipo_flags -arch ${arch} $<TARGET_FILE:${libname}>)\nendmacro()\n\nfunction(darwin_lipo_libs name)\n  cmake_parse_arguments(LIB\n    \"\"\n    \"PARENT_TARGET;OUTPUT_DIR;INSTALL_DIR\"\n    \"LIPO_FLAGS;DEPENDS\"\n    ${ARGN})\n  if(LIB_DEPENDS AND LIB_LIPO_FLAGS)\n    add_custom_command(OUTPUT ${LIB_OUTPUT_DIR}/lib${name}.a\n      COMMAND ${CMAKE_COMMAND} -E make_directory ${LIB_OUTPUT_DIR}\n      COMMAND lipo -output\n              ${LIB_OUTPUT_DIR}/lib${name}.a\n              -create ${LIB_LIPO_FLAGS}\n      DEPENDS ${LIB_DEPENDS}\n      )\n    add_custom_target(${name}\n      DEPENDS ${LIB_OUTPUT_DIR}/lib${name}.a)\n    add_dependencies(${LIB_PARENT_TARGET} ${name})\n    install(FILES ${LIB_OUTPUT_DIR}/lib${name}.a\n      DESTINATION ${LIB_INSTALL_DIR})\n  else()\n    message(WARNING \"Not generating lipo target for ${name} because no input libraries exist.\")\n  endif()\nendfunction()\n\n# Generates builtin libraries for all operating systems specified in ARGN. Each\n# OS library is constructed by lipo-ing together single-architecture libraries.\nmacro(darwin_add_builtin_libraries)\n  set(DARWIN_EXCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Darwin-excludes)\n\n  set(CFLAGS \"-fPIC -O3 -fvisibility=hidden -DVISIBILITY_HIDDEN -Wall -fomit-frame-pointer\")\n  set(CMAKE_C_FLAGS \"\")\n  set(CMAKE_CXX_FLAGS \"\")\n  set(CMAKE_ASM_FLAGS \"\")\n\n  set(PROFILE_SOURCES ../profile/InstrProfiling \n                      ../profile/InstrProfilingBuffer\n                      ../profile/InstrProfilingPlatformDarwin\n                      ../profile/InstrProfilingWriter)\n  foreach (os ${ARGN})\n    list_intersect(DARWIN_BUILTIN_ARCHS DARWIN_${os}_ARCHS BUILTIN_SUPPORTED_ARCH)\n    foreach (arch ${DARWIN_BUILTIN_ARCHS})\n      darwin_find_excluded_builtins_list(${arch}_${os}_EXCLUDED_BUILTINS\n                              OS ${os}\n                              ARCH ${arch}\n                              MIN_VERSION ${DARWIN_${os}_BUILTIN_MIN_VER})\n\n      filter_builtin_sources(filtered_sources\n        EXCLUDE ${arch}_${os}_EXCLUDED_BUILTINS\n        ${${arch}_SOURCES})\n\n      darwin_add_builtin_library(clang_rt builtins\n                              OS ${os}\n                              ARCH ${arch}\n                              SOURCES ${filtered_sources}\n                              CFLAGS ${CFLAGS} -arch ${arch}\n                              PARENT_TARGET builtins)\n    endforeach()\n\n    # Don't build cc_kext libraries for simulator platforms\n    if(NOT DARWIN_${os}_SKIP_CC_KEXT)\n      foreach (arch ${DARWIN_BUILTIN_ARCHS})\n        # By not specifying MIN_VERSION this only reads the OS and OS-arch lists.\n        # We don't want to filter out the builtins that are present in libSystem\n        # because kexts can't link libSystem.\n        darwin_find_excluded_builtins_list(${arch}_${os}_EXCLUDED_BUILTINS\n                              OS ${os}\n                              ARCH ${arch})\n\n        filter_builtin_sources(filtered_sources\n          EXCLUDE ${arch}_${os}_EXCLUDED_BUILTINS\n          ${${arch}_SOURCES})\n\n        # In addition to the builtins cc_kext includes some profile sources\n        darwin_add_builtin_library(clang_rt cc_kext\n                                OS ${os}\n                                ARCH ${arch}\n                                SOURCES ${filtered_sources} ${PROFILE_SOURCES}\n                                CFLAGS ${CFLAGS} -arch ${arch} -mkernel\n                                DEFS KERNEL_USE\n                                PARENT_TARGET builtins)\n      endforeach()\n      set(archive_name clang_rt.cc_kext_${os})\n      if(${os} STREQUAL \"osx\")\n        set(archive_name clang_rt.cc_kext)\n      endif()\n      darwin_lipo_libs(${archive_name}\n                      PARENT_TARGET builtins\n                      LIPO_FLAGS ${${os}_cc_kext_lipo_flags}\n                      DEPENDS ${${os}_cc_kext_libs}\n                      OUTPUT_DIR ${COMPILER_RT_LIBRARY_OUTPUT_DIR}\n                      INSTALL_DIR ${COMPILER_RT_LIBRARY_INSTALL_DIR})\n    endif()\n  endforeach()\n\n  # We put the x86 sim slices into the archives for their base OS\n  foreach (os ${ARGN})\n    if(NOT ${os} MATCHES \".*sim$\")\n      darwin_lipo_libs(clang_rt.${os}\n                        PARENT_TARGET builtins\n                        LIPO_FLAGS ${${os}_builtins_lipo_flags} ${${os}sim_builtins_lipo_flags}\n                        DEPENDS ${${os}_builtins_libs} ${${os}sim_builtins_libs}\n                        OUTPUT_DIR ${COMPILER_RT_LIBRARY_OUTPUT_DIR}\n                        INSTALL_DIR ${COMPILER_RT_LIBRARY_INSTALL_DIR})\n    endif()\n  endforeach()\n  darwin_add_embedded_builtin_libraries()\nendmacro()\n\nmacro(darwin_add_embedded_builtin_libraries)\n  # this is a hacky opt-out. If you can't target both intel and arm\n  # architectures we bail here.\n  set(DARWIN_SOFT_FLOAT_ARCHS armv6m armv7m armv7em armv7)\n  set(DARWIN_HARD_FLOAT_ARCHS armv7em armv7)\n  if(COMPILER_RT_SUPPORTED_ARCH MATCHES \".*armv.*\")\n    list(FIND COMPILER_RT_SUPPORTED_ARCH i386 i386_idx)\n    if(i386_idx GREATER -1)\n      list(APPEND DARWIN_HARD_FLOAT_ARCHS i386)\n    endif()\n\n    list(FIND COMPILER_RT_SUPPORTED_ARCH x86_64 x86_64_idx)\n    if(x86_64_idx GREATER -1)\n      list(APPEND DARWIN_HARD_FLOAT_ARCHS x86_64)\n    endif()\n\n    set(MACHO_SYM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/macho_embedded)\n\n    set(CFLAGS \"-Oz -Wall -fomit-frame-pointer -ffreestanding\")\n    set(CMAKE_C_FLAGS \"\")\n    set(CMAKE_CXX_FLAGS \"\")\n    set(CMAKE_ASM_FLAGS \"\")\n\n    set(SOFT_FLOAT_FLAG -mfloat-abi=soft)\n    set(HARD_FLOAT_FLAG -mfloat-abi=hard)\n\n    set(ENABLE_PIC Off)\n    set(PIC_FLAG -fPIC)\n    set(STATIC_FLAG -static)\n\n    set(DARWIN_macho_embedded_ARCHS armv6m armv7m armv7em armv7 i386 x86_64)\n\n    set(DARWIN_macho_embedded_LIBRARY_OUTPUT_DIR\n      ${COMPILER_RT_OUTPUT_DIR}/lib/macho_embedded)\n    set(DARWIN_macho_embedded_LIBRARY_INSTALL_DIR\n      ${COMPILER_RT_INSTALL_PATH}/lib/macho_embedded)\n      \n    set(CFLAGS_armv7 \"-target thumbv7-apple-darwin-eabi\")\n    set(CFLAGS_i386 \"-march=pentium\")\n\n    darwin_read_list_from_file(common_FUNCTIONS ${MACHO_SYM_DIR}/common.txt)\n    darwin_read_list_from_file(thumb2_FUNCTIONS ${MACHO_SYM_DIR}/thumb2.txt)\n    darwin_read_list_from_file(thumb2_64_FUNCTIONS ${MACHO_SYM_DIR}/thumb2-64.txt)\n    darwin_read_list_from_file(arm_FUNCTIONS ${MACHO_SYM_DIR}/arm.txt)\n    darwin_read_list_from_file(i386_FUNCTIONS ${MACHO_SYM_DIR}/i386.txt)\n\n\n    set(armv6m_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS})\n    set(armv7m_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS} ${thumb2_FUNCTIONS})\n    set(armv7em_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS} ${thumb2_FUNCTIONS})\n    set(armv7_FUNCTIONS ${common_FUNCTIONS} ${arm_FUNCTIONS} ${thumb2_FUNCTIONS} ${thumb2_64_FUNCTIONS})\n    set(i386_FUNCTIONS ${common_FUNCTIONS} ${i386_FUNCTIONS})\n    set(x86_64_FUNCTIONS ${common_FUNCTIONS})\n\n    foreach(arch ${DARWIN_macho_embedded_ARCHS})\n      filter_builtin_sources(${arch}_filtered_sources\n        INCLUDE ${arch}_FUNCTIONS\n        ${${arch}_SOURCES})\n      if(NOT ${arch}_filtered_sources)\n        message(\"${arch}_SOURCES: ${${arch}_SOURCES}\")\n        message(\"${arch}_FUNCTIONS: ${${arch}_FUNCTIONS}\")\n        message(FATAL_ERROR \"Empty filtered sources!\")\n      endif()\n    endforeach()\n\n    foreach(float_type SOFT HARD)\n      foreach(type PIC STATIC)\n        string(TOLOWER \"${float_type}_${type}\" lib_suffix)\n        foreach(arch ${DARWIN_${float_type}_FLOAT_ARCHS})\n          set(DARWIN_macho_embedded_SYSROOT ${DARWIN_osx_SYSROOT})\n          set(float_flag)\n          if(${arch} MATCHES \"^arm\")\n            # x86 targets are hard float by default, but the complain about the\n            # float ABI flag, so don't pass it unless we're targeting arm.\n            set(float_flag ${${float_type}_FLOAT_FLAG})\n          endif()\n          darwin_add_builtin_library(clang_rt ${lib_suffix}\n                                OS macho_embedded\n                                ARCH ${arch}\n                                SOURCES ${${arch}_filtered_sources}\n                                CFLAGS ${CFLAGS} -arch ${arch} ${${type}_FLAG} ${float_flag} ${CFLAGS_${arch}}\n                                PARENT_TARGET builtins)\n        endforeach()\n        foreach(lib ${macho_embedded_${lib_suffix}_libs})\n          set_target_properties(${lib} PROPERTIES LINKER_LANGUAGE C)\n        endforeach()\n        darwin_lipo_libs(clang_rt.${lib_suffix}\n                      PARENT_TARGET builtins\n                      LIPO_FLAGS ${macho_embedded_${lib_suffix}_lipo_flags}\n                      DEPENDS ${macho_embedded_${lib_suffix}_libs}\n                      OUTPUT_DIR ${DARWIN_macho_embedded_LIBRARY_OUTPUT_DIR}\n                      INSTALL_DIR ${DARWIN_macho_embedded_LIBRARY_INSTALL_DIR})\n      endforeach()\n    endforeach()\n  endif()\nendmacro()\n"
  },
  {
    "path": "runtime/cmake/CompilerRTLink.cmake",
    "content": "# Link a shared library with COMPILER_RT_TEST_COMPILER.\n# clang_link_shared(<output.so>\n#                   OBJECTS <list of input objects>\n#                   LINK_FLAGS <list of link flags>\n#                   DEPS <list of dependencies>)\nmacro(clang_link_shared so_file)\n  cmake_parse_arguments(SOURCE \"\" \"\" \"OBJECTS;LINK_FLAGS;DEPS\" ${ARGN})\n  if(NOT COMPILER_RT_STANDALONE_BUILD)\n    list(APPEND SOURCE_DEPS clang)\n  endif()\n  add_custom_command(\n    OUTPUT ${so_file}\n    COMMAND ${COMPILER_RT_TEST_COMPILER} -o \"${so_file}\" -shared\n            ${SOURCE_LINK_FLAGS} ${SOURCE_OBJECTS}\n    DEPENDS ${SOURCE_DEPS})\nendmacro()\n"
  },
  {
    "path": "runtime/cmake/CompilerRTUtils.cmake",
    "content": "include(CMakePushCheckState)\ninclude(CheckSymbolExists)\n\n# Because compiler-rt spends a lot of time setting up custom compile flags,\n# define a handy helper function for it. The compile flags setting in CMake\n# has serious issues that make its syntax challenging at best.\nfunction(set_target_compile_flags target)\n  set(argstring \"\")\n  foreach(arg ${ARGN})\n    set(argstring \"${argstring} ${arg}\")\n  endforeach()\n  set_property(TARGET ${target} PROPERTY COMPILE_FLAGS \"${argstring}\")\nendfunction()\n\nfunction(set_target_link_flags target)\n  set(argstring \"\")\n  foreach(arg ${ARGN})\n    set(argstring \"${argstring} ${arg}\")\n  endforeach()\n  set_property(TARGET ${target} PROPERTY LINK_FLAGS \"${argstring}\")\nendfunction()\n\n# Set the variable var_PYBOOL to True if var holds a true-ish string,\n# otherwise set it to False.\nmacro(pythonize_bool var)\n  if (${var})\n    set(${var}_PYBOOL True)\n  else()\n    set(${var}_PYBOOL False)\n  endif()\nendmacro()\n\n# Appends value to all lists in ARGN, if the condition is true.\nmacro(append_list_if condition value)\n  if(${condition})\n    foreach(list ${ARGN})\n      list(APPEND ${list} ${value})\n    endforeach()\n  endif()\nendmacro()\n\n# Appends value to all strings in ARGN, if the condition is true.\nmacro(append_string_if condition value)\n  if(${condition})\n    foreach(str ${ARGN})\n      set(${str} \"${${str}} ${value}\")\n    endforeach()\n  endif()\nendmacro()\n\nmacro(append_rtti_flag polarity list)\n  if(${polarity})\n    append_list_if(COMPILER_RT_HAS_FRTTI_FLAG -frtti ${list})\n    append_list_if(COMPILER_RT_HAS_GR_FLAG /GR ${list})\n  else()\n    append_list_if(COMPILER_RT_HAS_FNO_RTTI_FLAG -fno-rtti ${list})\n    append_list_if(COMPILER_RT_HAS_GR_FLAG /GR- ${list})\n  endif()\nendmacro()\n\nmacro(append_have_file_definition filename varname list)\n  check_include_file(\"${filename}\" \"${varname}\")\n  if (NOT ${varname})\n    set(\"${varname}\" 0)\n  endif()\n  list(APPEND ${list} \"${varname}=${${varname}}\")\nendmacro()\n\nmacro(list_intersect output input1 input2)\n  set(${output})\n  foreach(it ${${input1}})\n    list(FIND ${input2} ${it} index)\n    if( NOT (index EQUAL -1))\n      list(APPEND ${output} ${it})\n    endif()\n  endforeach()\nendmacro()\n\nfunction(list_replace input_list old new)\n  set(replaced_list)\n  foreach(item ${${input_list}})\n    if(${item} STREQUAL ${old})\n      list(APPEND replaced_list ${new})\n    else()\n      list(APPEND replaced_list ${item})\n    endif()\n  endforeach()\n  set(${input_list} \"${replaced_list}\" PARENT_SCOPE)\nendfunction()\n\n# Takes ${ARGN} and puts only supported architectures in @out_var list.\nfunction(filter_available_targets out_var)\n  set(archs ${${out_var}})\n  foreach(arch ${ARGN})\n    list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX)\n    if(NOT (ARCH_INDEX EQUAL -1) AND CAN_TARGET_${arch})\n      list(APPEND archs ${arch})\n    endif()\n  endforeach()\n  set(${out_var} ${archs} PARENT_SCOPE)\nendfunction()\n\n# Add $arch as supported with no additional flags.\nmacro(add_default_target_arch arch)\n  set(TARGET_${arch}_CFLAGS \"\")\n  set(CAN_TARGET_${arch} 1)\n  list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch})\nendmacro()\n\nfunction(check_compile_definition def argstring out_var)\n  if(\"${def}\" STREQUAL \"\")\n    set(${out_var} TRUE PARENT_SCOPE)\n    return()\n  endif()\n  cmake_push_check_state()\n  set(CMAKE_REQUIRED_FLAGS \"${CMAKE_REQUIRED_FLAGS} ${argstring}\")\n  check_symbol_exists(${def} \"\" ${out_var})\n  cmake_pop_check_state()\nendfunction()\n\n# test_target_arch(<arch> <def> <target flags...>)\n# Checks if architecture is supported: runs host compiler with provided\n# flags to verify that:\n#   1) <def> is defined (if non-empty)\n#   2) simple file can be successfully built.\n# If successful, saves target flags for this architecture.\nmacro(test_target_arch arch def)\n  set(TARGET_${arch}_CFLAGS ${ARGN})\n  set(TARGET_${arch}_LINK_FLAGS ${ARGN})\n  set(argstring \"\")\n  foreach(arg ${ARGN})\n    set(argstring \"${argstring} ${arg}\")\n  endforeach()\n  check_compile_definition(\"${def}\" \"${argstring}\" HAS_${arch}_DEF)\n  if(NOT DEFINED CAN_TARGET_${arch})\n    if(NOT HAS_${arch}_DEF)\n      set(CAN_TARGET_${arch} FALSE)\n    elseif(TEST_COMPILE_ONLY)\n      try_compile_only(CAN_TARGET_${arch} ${TARGET_${arch}_CFLAGS})\n    else()\n      set(FLAG_NO_EXCEPTIONS \"\")\n      if(COMPILER_RT_HAS_FNO_EXCEPTIONS_FLAG)\n        set(FLAG_NO_EXCEPTIONS \" -fno-exceptions \")\n      endif()\n      set(SAVED_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS})\n      set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${argstring}\")\n      try_compile(CAN_TARGET_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE}\n                  COMPILE_DEFINITIONS \"${TARGET_${arch}_CFLAGS} ${FLAG_NO_EXCEPTIONS}\"\n                  OUTPUT_VARIABLE TARGET_${arch}_OUTPUT)\n      set(CMAKE_EXE_LINKER_FLAGS ${SAVED_CMAKE_EXE_LINKER_FLAGS})\n    endif()\n  endif()\n  if(${CAN_TARGET_${arch}})\n    list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch})\n  elseif(\"${COMPILER_RT_DEFAULT_TARGET_ARCH}\" STREQUAL \"${arch}\" AND\n         COMPILER_RT_HAS_EXPLICIT_DEFAULT_TARGET_TRIPLE)\n    # Bail out if we cannot target the architecture we plan to test.\n    message(FATAL_ERROR \"Cannot compile for ${arch}:\\n${TARGET_${arch}_OUTPUT}\")\n  endif()\nendmacro()\n\nmacro(detect_target_arch)\n  check_symbol_exists(__arm__ \"\" __ARM)\n  check_symbol_exists(__aarch64__ \"\" __AARCH64)\n  check_symbol_exists(__x86_64__ \"\" __X86_64)\n  check_symbol_exists(__i386__ \"\" __I386)\n  check_symbol_exists(__mips__ \"\" __MIPS)\n  check_symbol_exists(__mips64__ \"\" __MIPS64)\n  check_symbol_exists(__powerpc64__ \"\" __PPC64)\n  check_symbol_exists(__powerpc64le__ \"\" __PPC64LE)\n  check_symbol_exists(__riscv \"\" __RISCV)\n  check_symbol_exists(__s390x__ \"\" __S390X)\n  check_symbol_exists(__wasm32__ \"\" __WEBASSEMBLY32)\n  check_symbol_exists(__wasm64__ \"\" __WEBASSEMBLY64)\n  if(__ARM)\n    add_default_target_arch(arm)\n  elseif(__AARCH64)\n    add_default_target_arch(aarch64)\n  elseif(__X86_64)\n    add_default_target_arch(x86_64)\n  elseif(__I386)\n    add_default_target_arch(i386)\n  elseif(__MIPS64) # must be checked before __MIPS\n    add_default_target_arch(mips64)\n  elseif(__MIPS)\n    add_default_target_arch(mips)\n  elseif(__PPC64)\n    add_default_target_arch(powerpc64)\n  elseif(__PPC64LE)\n    add_default_target_arch(powerpc64le)\n  elseif(__RISCV)\n    if(CMAKE_SIZEOF_VOID_P EQUAL \"4\")\n      add_default_target_arch(riscv32)\n    elseif(CMAKE_SIZEOF_VOID_P EQUAL \"8\")\n      add_default_target_arch(riscv64)\n    else()\n      message(FATAL_ERROR \"Unsupport XLEN for RISC-V\")\n    endif()\n  elseif(__S390X)\n    add_default_target_arch(s390x)\n  elseif(__WEBASSEMBLY32)\n    add_default_target_arch(wasm32)\n  elseif(__WEBASSEMBLY64)\n    add_default_target_arch(wasm64)\n  endif()\nendmacro()\n\nmacro(load_llvm_config)\n  if (NOT LLVM_CONFIG_PATH)\n    find_program(LLVM_CONFIG_PATH \"llvm-config\"\n                 DOC \"Path to llvm-config binary\")\n    if (NOT LLVM_CONFIG_PATH)\n      message(FATAL_ERROR \"llvm-config not found: specify LLVM_CONFIG_PATH\")\n    endif()\n  endif()\n  execute_process(\n    COMMAND ${LLVM_CONFIG_PATH} \"--obj-root\" \"--bindir\" \"--libdir\" \"--src-root\"\n    RESULT_VARIABLE HAD_ERROR\n    OUTPUT_VARIABLE CONFIG_OUTPUT)\n  if (HAD_ERROR)\n    message(FATAL_ERROR \"llvm-config failed with status ${HAD_ERROR}\")\n  endif()\n  string(REGEX REPLACE \"[ \\t]*[\\r\\n]+[ \\t]*\" \";\" CONFIG_OUTPUT ${CONFIG_OUTPUT})\n  list(GET CONFIG_OUTPUT 0 BINARY_DIR)\n  list(GET CONFIG_OUTPUT 1 TOOLS_BINARY_DIR)\n  list(GET CONFIG_OUTPUT 2 LIBRARY_DIR)\n  list(GET CONFIG_OUTPUT 3 MAIN_SRC_DIR)\n\n  set(LLVM_BINARY_DIR ${BINARY_DIR} CACHE PATH \"Path to LLVM build tree\")\n  set(LLVM_TOOLS_BINARY_DIR ${TOOLS_BINARY_DIR} CACHE PATH \"Path to llvm/bin\")\n  set(LLVM_LIBRARY_DIR ${LIBRARY_DIR} CACHE PATH \"Path to llvm/lib\")\n  set(LLVM_MAIN_SRC_DIR ${MAIN_SRC_DIR} CACHE PATH \"Path to LLVM source tree\")\n\n  # Make use of LLVM CMake modules.\n  # --cmakedir is supported since llvm r291218 (4.0 release)\n  execute_process(\n    COMMAND ${LLVM_CONFIG_PATH} --cmakedir\n    RESULT_VARIABLE HAD_ERROR\n    OUTPUT_VARIABLE CONFIG_OUTPUT)\n  if(NOT HAD_ERROR)\n    string(STRIP \"${CONFIG_OUTPUT}\" LLVM_CMAKE_PATH_FROM_LLVM_CONFIG)\n    file(TO_CMAKE_PATH ${LLVM_CMAKE_PATH_FROM_LLVM_CONFIG} LLVM_CMAKE_PATH)\n  else()\n    file(TO_CMAKE_PATH ${LLVM_BINARY_DIR} LLVM_BINARY_DIR_CMAKE_STYLE)\n    set(LLVM_CMAKE_PATH \"${LLVM_BINARY_DIR_CMAKE_STYLE}/lib${LLVM_LIBDIR_SUFFIX}/cmake/llvm\")\n  endif()\n\n  list(APPEND CMAKE_MODULE_PATH \"${LLVM_CMAKE_PATH}\")\n  # Get some LLVM variables from LLVMConfig.\n  include(\"${LLVM_CMAKE_PATH}/LLVMConfig.cmake\")\n\n  set(LLVM_LIBRARY_OUTPUT_INTDIR\n    ${LLVM_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib${LLVM_LIBDIR_SUFFIX})\nendmacro()\n\nmacro(construct_compiler_rt_default_triple)\n  if(COMPILER_RT_DEFAULT_TARGET_ONLY)\n    if(DEFINED COMPILER_RT_DEFAULT_TARGET_TRIPLE)\n      message(FATAL_ERROR \"COMPILER_RT_DEFAULT_TARGET_TRIPLE isn't supported when building for default target only\")\n    endif()\n    set(COMPILER_RT_DEFAULT_TARGET_TRIPLE ${CMAKE_C_COMPILER_TARGET})\n  else()\n    set(COMPILER_RT_DEFAULT_TARGET_TRIPLE ${TARGET_TRIPLE} CACHE STRING\n          \"Default triple for which compiler-rt runtimes will be built.\")\n  endif()\n\n  if(DEFINED COMPILER_RT_TEST_TARGET_TRIPLE)\n    # Backwards compatibility: this variable used to be called\n    # COMPILER_RT_TEST_TARGET_TRIPLE.\n    set(COMPILER_RT_DEFAULT_TARGET_TRIPLE ${COMPILER_RT_TEST_TARGET_TRIPLE})\n  endif()\n\n  string(REPLACE \"-\" \";\" TARGET_TRIPLE_LIST ${COMPILER_RT_DEFAULT_TARGET_TRIPLE})\n  list(GET TARGET_TRIPLE_LIST 0 COMPILER_RT_DEFAULT_TARGET_ARCH)\n  list(GET TARGET_TRIPLE_LIST 1 COMPILER_RT_DEFAULT_TARGET_OS)\n  list(LENGTH TARGET_TRIPLE_LIST TARGET_TRIPLE_LIST_LENGTH)\n  if(TARGET_TRIPLE_LIST_LENGTH GREATER 2)\n    list(GET TARGET_TRIPLE_LIST 2 COMPILER_RT_DEFAULT_TARGET_ABI)\n  endif()\n  # Determine if test target triple is specified explicitly, and doesn't match the\n  # default.\n  if(NOT COMPILER_RT_DEFAULT_TARGET_TRIPLE STREQUAL TARGET_TRIPLE)\n    set(COMPILER_RT_HAS_EXPLICIT_DEFAULT_TARGET_TRIPLE TRUE)\n  else()\n    set(COMPILER_RT_HAS_EXPLICIT_DEFAULT_TARGET_TRIPLE FALSE)\n  endif()\nendmacro()\n\n# Filter out generic versions of routines that are re-implemented in\n# architecture specific manner.  This prevents multiple definitions of the\n# same symbols, making the symbol selection non-deterministic.\nfunction(filter_builtin_sources output_var exclude_or_include excluded_list)\n  if(exclude_or_include STREQUAL \"EXCLUDE\")\n    set(filter_action GREATER)\n    set(filter_value -1)\n  elseif(exclude_or_include STREQUAL \"INCLUDE\")\n    set(filter_action LESS)\n    set(filter_value 0)\n  else()\n    message(FATAL_ERROR \"filter_builtin_sources called without EXCLUDE|INCLUDE\")\n  endif()\n\n  set(intermediate ${ARGN})\n  foreach (_file ${intermediate})\n    get_filename_component(_name_we ${_file} NAME_WE)\n    list(FIND ${excluded_list} ${_name_we} _found)\n    if(_found ${filter_action} ${filter_value})\n      list(REMOVE_ITEM intermediate ${_file})\n    elseif(${_file} MATCHES \".*/.*\\\\.S\" OR ${_file} MATCHES \".*/.*\\\\.c\")\n      get_filename_component(_name ${_file} NAME)\n      string(REPLACE \".S\" \".c\" _cname \"${_name}\")\n      list(REMOVE_ITEM intermediate ${_cname})\n    endif ()\n  endforeach ()\n  set(${output_var} ${intermediate} PARENT_SCOPE)\nendfunction()\n\nfunction(get_compiler_rt_target arch variable)\n  if(ANDROID AND ${arch} STREQUAL \"i386\")\n    set(target \"i686${COMPILER_RT_OS_SUFFIX}-${COMPILER_RT_DEFAULT_TARGET_OS}\")\n  else()\n    set(target \"${arch}-${COMPILER_RT_DEFAULT_TARGET_OS}\")\n  endif()\n  if(COMPILER_RT_DEFAULT_TARGET_ABI)\n    set(target \"${target}-${COMPILER_RT_DEFAULT_TARGET_ABI}\")\n  endif()\n  set(${variable} ${target} PARENT_SCOPE)\nendfunction()\n\nfunction(get_compiler_rt_install_dir arch install_dir)\n  if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)\n    get_compiler_rt_target(${arch} target)\n    set(${install_dir} ${COMPILER_RT_INSTALL_PATH}/${target}/lib PARENT_SCOPE)\n  else()\n    set(${install_dir} ${COMPILER_RT_LIBRARY_INSTALL_DIR} PARENT_SCOPE)\n  endif()\nendfunction()\n\nfunction(get_compiler_rt_output_dir arch output_dir)\n  if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)\n    get_compiler_rt_target(${arch} target)\n    set(${output_dir} ${COMPILER_RT_OUTPUT_DIR}/${target}/lib PARENT_SCOPE)\n  else()\n    set(${output_dir} ${COMPILER_RT_LIBRARY_OUTPUT_DIR} PARENT_SCOPE)\n  endif()\nendfunction()\n\n# compiler_rt_process_sources(\n#   <OUTPUT_VAR>\n#   <SOURCE_FILE> ...\n#  [ADDITIONAL_HEADERS <header> ...]\n# )\n#\n# Process the provided sources and write the list of new sources\n# into `<OUTPUT_VAR>`.\n#\n# ADDITIONAL_HEADERS     - Adds the supplied header to list of sources for IDEs.\n#\n# This function is very similar to `llvm_process_sources()` but exists here\n# because we need to support standalone builds of compiler-rt.\nfunction(compiler_rt_process_sources OUTPUT_VAR)\n  cmake_parse_arguments(\n    ARG\n    \"\"\n    \"\"\n    \"ADDITIONAL_HEADERS\"\n    ${ARGN}\n  )\n  set(sources ${ARG_UNPARSED_ARGUMENTS})\n  set(headers \"\")\n  if (XCODE OR MSVC_IDE OR CMAKE_EXTRA_GENERATOR)\n    # For IDEs we need to tell CMake about header files.\n    # Otherwise they won't show up in UI.\n    set(headers ${ARG_ADDITIONAL_HEADERS})\n    list(LENGTH headers headers_length)\n    if (${headers_length} GREATER 0)\n      set_source_files_properties(${headers}\n        PROPERTIES HEADER_FILE_ONLY ON)\n    endif()\n  endif()\n  set(\"${OUTPUT_VAR}\" ${sources} ${headers} PARENT_SCOPE)\nendfunction()\n"
  },
  {
    "path": "runtime/cmake/CustomLibcxx/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.4.3)\nproject(custom-libcxx C CXX)\n\nmessage(STATUS \"COMPILER_RT_LIBCXX_PATH = ${COMPILER_RT_LIBCXX_PATH}\")\n\n# Build static libcxxabi.\nset(LIBCXXABI_STANDALONE_BUILD 1)\nset(LIBCXXABI_ENABLE_SHARED OFF CACHE BOOL \"\")\nset(LIBCXXABI_ENABLE_EXCEPTIONS ON CACHE BOOL \"\")\nset(LIBCXXABI_HERMETIC_STATIC_LIBRARY ON CACHE STRING \"\")\nset(LIBCXXABI_LIBCXX_PATH ${COMPILER_RT_LIBCXX_PATH} CACHE PATH \"\")\nset(LIBCXXABI_INCLUDE_TESTS OFF CACHE BOOL \"\")\nadd_subdirectory(${COMPILER_RT_LIBCXXABI_PATH} ${CMAKE_CURRENT_BINARY_DIR}/cxxabi)\n\n# Build static libcxx without exceptions.\nset(LIBCXX_STANDALONE_BUILD 1)\nset(LIBCXX_ENABLE_EXPERIMENTAL_LIBRARY OFF CACHE BOOL \"\")\nset(LIBCXX_ENABLE_SHARED OFF CACHE BOOL \"\")\nset(LIBCXX_ENABLE_EXCEPTIONS ON CACHE BOOL \"\")\nset(LIBCXX_HERMETIC_STATIC_LIBRARY ON CACHE BOOL \"\")\n\n# Use above libcxxabi.\nset(LIBCXX_CXX_ABI \"libcxxabi\" CACHE STRING \"\")\nset(LIBCXX_CXX_ABI_INTREE 1)\nset(LIBCXX_ENABLE_STATIC_ABI_LIBRARY ON CACHE BOOL \"\")\nset(LIBCXX_CXX_ABI_INCLUDE_PATHS ${COMPILER_RT_LIBCXXABI_PATH}/include CACHE PATH \"\")\n\nadd_subdirectory(${COMPILER_RT_LIBCXX_PATH} ${CMAKE_CURRENT_BINARY_DIR}/cxx)\n"
  },
  {
    "path": "runtime/cmake/HandleCompilerRT.cmake",
    "content": "function(find_compiler_rt_library name dest)\n  set(dest \"\" PARENT_SCOPE)\n  set(CLANG_COMMAND ${CMAKE_CXX_COMPILER} ${SANITIZER_COMMON_CFLAGS}\n      \"--rtlib=compiler-rt\" \"--print-libgcc-file-name\")\n  if (CMAKE_CXX_COMPILER_ID MATCHES Clang AND CMAKE_CXX_COMPILER_TARGET)\n    list(APPEND CLANG_COMMAND \"--target=${CMAKE_CXX_COMPILER_TARGET}\")\n  endif()\n  execute_process(\n      COMMAND ${CLANG_COMMAND}\n      RESULT_VARIABLE HAD_ERROR\n      OUTPUT_VARIABLE LIBRARY_FILE\n  )\n  string(STRIP \"${LIBRARY_FILE}\" LIBRARY_FILE)\n  string(REPLACE \"builtins\" \"${name}\" LIBRARY_FILE \"${LIBRARY_FILE}\")\n  if (NOT HAD_ERROR AND EXISTS \"${LIBRARY_FILE}\")\n    message(STATUS \"Found compiler-rt ${name} library: ${LIBRARY_FILE}\")\n    set(${dest} \"${LIBRARY_FILE}\" PARENT_SCOPE)\n  else()\n    message(STATUS \"Failed to find compiler-rt ${name} library\")\n  endif()\nendfunction()\n"
  },
  {
    "path": "runtime/cmake/SanitizerUtils.cmake",
    "content": "include(CompilerRTUtils)\n\nset(SANITIZER_GEN_DYNAMIC_LIST\n  ${COMPILER_RT_SOURCE_DIR}/sanitizer_common/scripts/gen_dynamic_list.py)\n\nset(SANITIZER_LINT_SCRIPT\n  ${COMPILER_RT_SOURCE_DIR}/sanitizer_common/scripts/check_lint.sh)\n\nif(CMAKE_NM)\n  set(SANITIZER_NM \"${CMAKE_NM}\")\nelse()\n  set(SANITIZER_NM nm)\nendif()\n\n# Create a target \"<name>-<arch>-symbols\" that would generate the list of\n# symbols that need to be exported from sanitizer runtime \"<name>\". Function\n# interceptors are exported automatically, user can also provide files with\n# symbol names that should be exported as well.\n#   add_sanitizer_rt_symbols(<name>\n#                            ARCHS <architectures>\n#                            PARENT_TARGET <convenience parent target>\n#                            EXTRA <files with extra symbols to export>)\nmacro(add_sanitizer_rt_symbols name)\n  cmake_parse_arguments(ARG\n    \"\"\n    \"PARENT_TARGET\"\n    \"ARCHS;EXTRA\"\n    ${ARGN})\n  foreach(arch ${ARG_ARCHS})\n    set(target_name ${name}-${arch})\n    set(stamp ${CMAKE_CURRENT_BINARY_DIR}/${target_name}.syms-stamp)\n    set(extra_args)\n    foreach(arg ${ARG_EXTRA})\n      list(APPEND extra_args \"--extra\" ${arg})\n    endforeach()\n    add_custom_command(OUTPUT ${stamp}\n      COMMAND ${Python3_EXECUTABLE}\n        ${SANITIZER_GEN_DYNAMIC_LIST} ${extra_args} $<TARGET_FILE:${target_name}>\n        --nm-executable \"${SANITIZER_NM}\" -o $<TARGET_FILE:${target_name}>.syms\n      COMMAND ${CMAKE_COMMAND} -E touch ${stamp}\n      DEPENDS ${target_name} ${SANITIZER_GEN_DYNAMIC_LIST} ${ARG_EXTRA}\n      WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}\n      COMMENT \"Generating exported symbols for ${target_name}\"\n      VERBATIM)\n    add_custom_target(${target_name}-symbols ALL\n      DEPENDS ${stamp}\n      SOURCES ${SANITIZER_GEN_DYNAMIC_LIST} ${ARG_EXTRA})\n    get_compiler_rt_install_dir(${arch} install_dir)\n    install(FILES $<TARGET_FILE:${target_name}>.syms\n            DESTINATION ${install_dir})\n    if(ARG_PARENT_TARGET)\n      add_dependencies(${ARG_PARENT_TARGET} ${target_name}-symbols)\n    endif()\n  endforeach()\nendmacro()\n\n# This function is only used on Darwin, where undefined symbols must be specified\n# in the linker invocation.\nfunction(add_weak_symbols libname link_flags)\n  set(weak_symbols_file \"${COMPILER_RT_SOURCE_DIR}/${libname}/weak_symbols.txt\")\n  file(STRINGS  \"${weak_symbols_file}\" WEAK_SYMBOLS)\n  # Add this file as a configure-time dependency so that changes to this\n  # file trigger a re-configure. This is necessary so that `${link_flags}`\n  # is changed when appropriate.\n  set_property(\n    DIRECTORY \"${CMAKE_CURRENT_SOURCE_DIR}\"\n    APPEND\n    PROPERTY CMAKE_CONFIGURE_DEPENDS \"${weak_symbols_file}\")\n  set(local_link_flags ${${link_flags}})\n  foreach(SYMBOL ${WEAK_SYMBOLS})\n    set(local_link_flags ${local_link_flags} -Wl,-U,${SYMBOL})\n  endforeach()\n  set(${link_flags} ${local_link_flags} PARENT_SCOPE)\nendfunction()\n\nmacro(add_sanitizer_rt_version_list name)\n  set(vers ${CMAKE_CURRENT_BINARY_DIR}/${name}.vers)\n  cmake_parse_arguments(ARG \"\" \"\" \"LIBS;EXTRA\" ${ARGN})\n  set(args)\n  foreach(arg ${ARG_EXTRA})\n    list(APPEND args \"--extra\" ${arg})\n  endforeach()\n  foreach(arg ${ARG_LIBS})\n    list(APPEND args \"$<TARGET_FILE:${arg}>\")\n  endforeach()\n  add_custom_command(OUTPUT ${vers}\n    COMMAND ${Python3_EXECUTABLE}\n      ${SANITIZER_GEN_DYNAMIC_LIST} --version-list ${args}\n      --nm-executable \"${SANITIZER_NM}\" -o ${vers}\n    DEPENDS ${SANITIZER_GEN_DYNAMIC_LIST} ${ARG_EXTRA} ${ARG_LIBS}\n    WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}\n    COMMENT \"Generating version list for ${name}\"\n    VERBATIM)\n\n  add_custom_target(${name}-version-list ALL\n    DEPENDS ${vers})\nendmacro()\n\n# Add target to check code style for sanitizer runtimes.\nif(CMAKE_HOST_UNIX AND NOT OS_NAME MATCHES \"OpenBSD\")\n  add_custom_target(SanitizerLintCheck\n    COMMAND env LLVM_CHECKOUT=${LLVM_MAIN_SRC_DIR} SILENT=1 TMPDIR=\n      PYTHON_EXECUTABLE=${Python3_EXECUTABLE}\n      COMPILER_RT=${COMPILER_RT_SOURCE_DIR}\n      ${SANITIZER_LINT_SCRIPT}\n    DEPENDS ${SANITIZER_LINT_SCRIPT}\n    COMMENT \"Running lint check for sanitizer sources...\"\n    VERBATIM)\nelse()\n  add_custom_target(SanitizerLintCheck\n    COMMAND echo \"No lint check\")\nendif()\nset_target_properties(SanitizerLintCheck\n  PROPERTIES FOLDER \"Compiler-RT Misc\")\n"
  },
  {
    "path": "runtime/common_interface_defs.h",
    "content": "//===-- sanitizer/common_interface_defs.h -----------------------*- C++ -*-===//\n//\n//                     The LLVM Compiler Infrastructure\n//\n// This file is distributed under the University of Illinois Open Source\n// License. See LICENSE.TXT for details.\n//\n//===----------------------------------------------------------------------===//\n//\n// Common part of the public sanitizer interface.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_COMMON_INTERFACE_DEFS_H\n#define SANITIZER_COMMON_INTERFACE_DEFS_H\n\n#include <stddef.h>\n#include <stdint.h>\n\n// GCC does not understand __has_feature.\n#if !defined(__has_feature)\n# define __has_feature(x) 0\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n  // Arguments for __sanitizer_sandbox_on_notify() below.\n  typedef struct {\n    // Enable sandbox support in sanitizer coverage.\n    int coverage_sandboxed;\n    // File descriptor to write coverage data to. If -1 is passed, a file will\n    // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no\n    // effect if coverage_sandboxed == 0.\n    intptr_t coverage_fd;\n    // If non-zero, split the coverage data into well-formed blocks. This is\n    // useful when coverage_fd is a socket descriptor. Each block will contain\n    // a header, allowing data from multiple processes to be sent over the same\n    // socket.\n    unsigned int coverage_max_block_size;\n  } __sanitizer_sandbox_arguments;\n\n  // Tell the tools to write their reports to \"path.<pid>\" instead of stderr.\n  void __sanitizer_set_report_path(const char *path);\n  // Tell the tools to write their reports to the provided file descriptor\n  // (casted to void *).\n  void __sanitizer_set_report_fd(void *fd);\n\n  // Notify the tools that the sandbox is going to be turned on. The reserved\n  // parameter will be used in the future to hold a structure with functions\n  // that the tools may call to bypass the sandbox.\n  void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);\n\n  // This function is called by the tool when it has just finished reporting\n  // an error. 'error_summary' is a one-line string that summarizes\n  // the error message. This function can be overridden by the client.\n  void __sanitizer_report_error_summary(const char *error_summary);\n\n  // Some of the sanitizers (e.g. asan/tsan) may miss bugs that happen\n  // in unaligned loads/stores. In order to find such bugs reliably one needs\n  // to replace plain unaligned loads/stores with these calls.\n  uint16_t __sanitizer_unaligned_load16(const void *p);\n  uint32_t __sanitizer_unaligned_load32(const void *p);\n  uint64_t __sanitizer_unaligned_load64(const void *p);\n  void __sanitizer_unaligned_store16(void *p, uint16_t x);\n  void __sanitizer_unaligned_store32(void *p, uint32_t x);\n  void __sanitizer_unaligned_store64(void *p, uint64_t x);\n\n  // Annotate the current state of a contiguous container, such as\n  // std::vector, std::string or similar.\n  // A contiguous container is a container that keeps all of its elements\n  // in a contiguous region of memory. The container owns the region of memory\n  // [beg, end); the memory [beg, mid) is used to store the current elements\n  // and the memory [mid, end) is reserved for future elements;\n  // beg <= mid <= end. For example, in \"std::vector<> v\"\n  //   beg = &v[0];\n  //   end = beg + v.capacity() * sizeof(v[0]);\n  //   mid = beg + v.size()     * sizeof(v[0]);\n  //\n  // This annotation tells the Sanitizer tool about the current state of the\n  // container so that the tool can report errors when memory from [mid, end)\n  // is accessed. Insert this annotation into methods like push_back/pop_back.\n  // Supply the old and the new values of mid (old_mid/new_mid).\n  // In the initial state mid == end and so should be the final\n  // state when the container is destroyed or when it reallocates the storage.\n  //\n  // Use with caution and don't use for anything other than vector-like classes.\n  //\n  // For AddressSanitizer, 'beg' should be 8-aligned and 'end' should\n  // be either 8-aligned or it should point to the end of a separate heap-,\n  // stack-, or global- allocated buffer. I.e. the following will not work:\n  //   int64_t x[2];  // 16 bytes, 8-aligned.\n  //   char *beg = (char *)&x[0];\n  //   char *end = beg + 12;  // Not 8 aligned, not the end of the buffer.\n  // This however will work fine:\n  //   int32_t x[3];  // 12 bytes, but 8-aligned under AddressSanitizer.\n  //   char *beg = (char*)&x[0];\n  //   char *end = beg + 12;  // Not 8-aligned, but is the end of the buffer.\n  void __sanitizer_annotate_contiguous_container(const void *beg,\n                                                 const void *end,\n                                                 const void *old_mid,\n                                                 const void *new_mid);\n  // Returns true if the contiguous container [beg, end) is properly poisoned\n  // (e.g. with __sanitizer_annotate_contiguous_container), i.e. if\n  //  - [beg, mid) is addressable,\n  //  - [mid, end) is unaddressable.\n  // Full verification requires O(end-beg) time; this function tries to avoid\n  // such complexity by touching only parts of the container around beg/mid/end.\n  int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,\n                                              const void *end);\n\n  // Similar to __sanitizer_verify_contiguous_container but returns the address\n  // of the first improperly poisoned byte otherwise. Returns null if the area\n  // is poisoned properly.\n  const void *__sanitizer_contiguous_container_find_bad_address(\n      const void *beg, const void *mid, const void *end);\n\n  // Print the stack trace leading to this call. Useful for debugging user code.\n  void __sanitizer_print_stack_trace();\n\n  // Symbolizes the supplied 'pc' using the format string 'fmt'.\n  // Outputs at most 'out_buf_size' bytes into 'out_buf'.\n  // The format syntax is described in\n  // lib/sanitizer_common/sanitizer_stacktrace_printer.h.\n  void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,\n                                size_t out_buf_size);\n  // Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).\n  void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,\n                                    char *out_buf, size_t out_buf_size);\n\n  // Sets the callback to be called right before death on error.\n  // Passing 0 will unset the callback.\n  void __sanitizer_set_death_callback(void (*callback)(void));\n\n  // Interceptor hooks.\n  // Whenever a libc function interceptor is called it checks if the\n  // corresponding weak hook is defined, and it so -- calls it.\n  // The primary use case is data-flow-guided fuzzing, where the fuzzer needs\n  // to know what is being passed to libc functions, e.g. memcmp.\n  // FIXME: implement more hooks.\n  void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,\n                                    const void *s2, size_t n, int result);\n  void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,\n                                    const char *s2, size_t n, int result);\n  void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,\n                                         const char *s2, size_t n, int result);\n  void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,\n                                    const char *s2, int result);\n  void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,\n                                        const char *s2, int result);\n  void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,\n                                    const char *s2, char *result);\n  void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,\n                                        const char *s2, char *result);\n  void __sanitizer_weak_hook_memmem(void *called_pc,\n                                    const void *s1, size_t len1,\n                                    const void *s2, size_t len2, void *result);\n\n  // Prints stack traces for all live heap allocations ordered by total\n  // allocation size until `top_percent` of total live heap is shown.\n  // `top_percent` should be between 1 and 100.\n  // Experimental feature currently available only with asan on Linux/x86_64.\n  void __sanitizer_print_memory_profile(size_t top_percent);\n\n  // Fiber annotation interface.\n  // Before switching to a different stack, one must call\n  // __sanitizer_start_switch_fiber with a pointer to the bottom of the\n  // destination stack and its size. When code starts running on the new stack,\n  // it must call __sanitizer_finish_switch_fiber to finalize the switch.\n  // The start_switch function takes a void** to store the current fake stack if\n  // there is one (it is needed when detect_stack_use_after_return is enabled).\n  // When restoring a stack, this pointer must be given to the finish_switch\n  // function. In most cases, this void* can be stored on the stack just before\n  // switching.  When leaving a fiber definitely, null must be passed as first\n  // argument to the start_switch function so that the fake stack is destroyed.\n  // If you do not want support for stack use-after-return detection, you can\n  // always pass null to these two functions.\n  // Note that the fake stack mechanism is disabled during fiber switch, so if a\n  // signal callback runs during the switch, it will not benefit from the stack\n  // use-after-return detection.\n  void __sanitizer_start_switch_fiber(void **fake_stack_save,\n                                      const void *bottom, size_t size);\n  void __sanitizer_finish_switch_fiber(void *fake_stack_save,\n                                       const void **bottom_old,\n                                       size_t *size_old);\n#ifdef __cplusplus\n}  // extern \"C\"\n#endif\n\n#endif  // SANITIZER_COMMON_INTERFACE_DEFS_H\n"
  },
  {
    "path": "runtime/dfsan/.clang-format",
    "content": "BasedOnStyle: Google\nAllowShortIfStatementsOnASingleLine: false\nIndentPPDirectives: AfterHash\n"
  },
  {
    "path": "runtime/dfsan/CMakeLists.txt",
    "content": "# include_directories(..)\n\n# Runtime library sources and build flags.\nset(DFSAN_RTL_SOURCES\n  dfsan.cpp\n  dfsan_custom.cpp\n  dfsan_interceptors.cpp\n  taint_allocator.cpp\n  union_util.cpp\n  union_hashtable.cpp)\n\nset(DFSAN_RTL_HEADERS\n  dfsan.h\n  dfsan_flags.inc\n  dfsan_platform.h\n  taint_allocator.h\n  union_util.h\n  union_hashtable.h)\n\nset(DFSAN_COMMON_CFLAGS ${SANITIZER_COMMON_CFLAGS})\n\nif(OS_NAME MATCHES \"Linux\" AND\n   COMPILER_RT_LIBCXX_PATH AND\n   COMPILER_RT_LIBCXXABI_PATH)\n  list(APPEND DFSAN_COMMON_CFLAGS -nostdinc++ -D_LIBCPP_ABI_VERSION=Taint)\n  # Remove -stdlib= which is unused when passing -nostdinc++.\n  string(REGEX REPLACE \"-stdlib=[a-zA-Z+]*\" \"\" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})\nendif()\n\nappend_rtti_flag(OFF DFSAN_COMMON_CFLAGS)\n# Prevent clang from generating libc calls.\nappend_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding DFSAN_COMMON_CFLAGS)\n\n# Static runtime library.\nadd_compiler_rt_component(dfsan)\n\nforeach(arch ${DFSAN_SUPPORTED_ARCH})\n  set(DFSAN_CFLAGS ${DFSAN_COMMON_CFLAGS})\n  append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE DFSAN_CFLAGS)\n  add_compiler_rt_runtime(dfsan_rt\n    STATIC\n    ARCHS ${arch}\n    SOURCES ${DFSAN_RTL_SOURCES}\n            $<TARGET_OBJECTS:RTInterception.${arch}>\n            $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>\n            $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>\n            $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>\n    ADDITIONAL_HEADERS ${DFSAN_RTL_HEADERS}\n    CFLAGS ${DFSAN_CFLAGS}\n    PARENT_TARGET dfsan)\n  add_sanitizer_rt_symbols(dfsan_rt\n    ARCHS ${arch}\n    EXTRA dfsan.syms.extra)\n  add_dependencies(dfsan\n    dfsan_rt-${arch}-symbols)\n\n  if(OS_NAME MATCHES \"Linux\" AND\n    COMPILER_RT_LIBCXX_PATH AND\n    COMPILER_RT_LIBCXXABI_PATH)\n    macro(partially_link_libcxx name dir arch)\n      set(cxx_${arch}_merge_dir \"${CMAKE_CURRENT_BINARY_DIR}/cxx_${arch}_merge.dir\")\n      file(MAKE_DIRECTORY ${cxx_${arch}_merge_dir})\n      add_custom_command(TARGET dfsan_rt-${arch} POST_BUILD\n        COMMAND ${CMAKE_LINKER} --whole-archive \"$<TARGET_LINKER_FILE:dfsan_rt-${arch}>\" --no-whole-archive ${dir}/lib/libc++.a --no-whole-archive ${dir}/lib/libc++abi.a -r -o ${name}.o\n        COMMAND ${CMAKE_OBJCOPY} --localize-hidden ${name}.o\n        COMMAND ${CMAKE_COMMAND} -E remove \"$<TARGET_LINKER_FILE:dfsan_rt-${arch}>\"\n        COMMAND ${CMAKE_AR} qcs \"$<TARGET_LINKER_FILE:dfsan_rt-${arch}>\" ${name}.o\n        WORKING_DIRECTORY ${cxx_${arch}_merge_dir}\n      )\n    endmacro()\n\n    set(TARGET_CFLAGS ${TARGET_${arch}_CFLAGS} PARENT_SCOPE)\n    set(LIBCXX_${arch}_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libcxx_taint_${arch})\n    add_custom_libcxx(libcxx_taint_${arch} ${LIBCXX_${arch}_PREFIX}\n      CFLAGS ${TARGET_CFLAGS}\n             -D_LIBCPP_ABI_VERSION=Taint\n             -D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS=1\n             -fvisibility=hidden\n      CMAKE_ARGS -DCMAKE_CXX_COMPILER_WORKS=ON\n                -DCMAKE_POSITION_INDEPENDENT_CODE=ON\n                -DLIBCXX_ENABLE_EXCEPTIONS=ON\n                -DLIBCXX_ABI_NAMESPACE=__Taint)\n    target_compile_options(dfsan_rt-${arch} PRIVATE -isystem ${LIBCXX_${arch}_PREFIX}/include/c++/v1)\n    add_dependencies(dfsan_rt-${arch} libcxx_taint_${arch}-build)\n    partially_link_libcxx(taint ${LIBCXX_${arch}_PREFIX} ${arch})\n  endif()\n\n  install(FILES ${COMPILER_RT_OUTPUT_DIR}/libdfsan_rt-${arch}.a.syms\n    DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR})\nendforeach()\n\nset(dfsan_abilist_dir ${COMPILER_RT_OUTPUT_DIR}/share)\nset(dfsan_abilist_filename ${dfsan_abilist_dir}/dfsan_abilist.txt)\nadd_custom_target(dfsan_abilist ALL\n  DEPENDS ${dfsan_abilist_filename})\nadd_custom_command(OUTPUT ${dfsan_abilist_filename}\n                   VERBATIM\n                   COMMAND\n                    ${CMAKE_COMMAND} -E make_directory ${dfsan_abilist_dir}\n                   COMMAND\n                     cat ${CMAKE_CURRENT_SOURCE_DIR}/done_abilist.txt\n                         ${CMAKE_CURRENT_SOURCE_DIR}/libc_ubuntu2404_abilist.txt\n                         > ${dfsan_abilist_filename}\n                   DEPENDS done_abilist.txt libc_ubuntu2404_abilist.txt)\nadd_dependencies(dfsan dfsan_abilist)\ninstall(FILES ${dfsan_abilist_filename}\n  DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR})\ninstall(FILES \"libc++_abilist.txt\"\n  DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR})\n\nset(taint_linker_script_filename ${COMPILER_RT_OUTPUT_DIR}/taint.ld)\nadd_custom_target(taint_linker_script ALL\n  DEPENDS ${taint_linker_script_filename})\nadd_custom_command(OUTPUT ${taint_linker_script_filename}\n                   VERBATIM\n                   COMMAND\n                     cat ${CMAKE_CURRENT_SOURCE_DIR}/taint.ld\n                         > ${taint_linker_script_filename}\n                   DEPENDS taint.ld)\nadd_dependencies(dfsan taint_linker_script)\ninstall(FILES ${taint_linker_script_filename}\n  DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR})\n"
  },
  {
    "path": "runtime/dfsan/dfsan.cpp",
    "content": "//===-- dfsan.cc ----------------------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of DataFlowSanitizer.\n//\n// DataFlowSanitizer runtime.  This file defines the public interface to\n// DataFlowSanitizer as well as the definition of certain runtime functions\n// called automatically by the compiler (specifically the instrumentation pass\n// in llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp).\n//\n// The public interface is defined in include/sanitizer/dfsan_interface.h whose\n// functions are prefixed dfsan_ while the compiler interface functions are\n// prefixed __dfsan_.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_atomic.h\"\n#include \"sanitizer_common/sanitizer_allocator_internal.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_file.h\"\n#include \"sanitizer_common/sanitizer_flags.h\"\n#include \"sanitizer_common/sanitizer_flag_parser.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"sanitizer_common/sanitizer_mutex.h\"\n#include \"sanitizer_common/sanitizer_posix.h\"\n\n#include \"dfsan.h\"\n#include \"taint_allocator.h\"\n#include \"union_util.h\"\n#include \"union_hashtable.h\"\n\n#include <assert.h>\n#include <arpa/inet.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/mman.h>\n#include <sys/shm.h>\n#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <sys/un.h>\n#include <unistd.h>\n\nusing namespace __dfsan;\n\ntypedef atomic_uint32_t atomic_dfsan_label;\n\nstatic atomic_dfsan_label __dfsan_last_label;\nstatic dfsan_label_info *__dfsan_label_info;\n\n// FIXME: single thread\n// statck bottom\nstatic dfsan_label __alloca_stack_bottom;\nstatic dfsan_label __alloca_stack_top;\nstatic const int MAX_SAVED_STACK_ENTRIES = 1024;\nstatic dfsan_label __saved_alloca_stack_top[MAX_SAVED_STACK_ENTRIES];\nstatic int __current_saved_stack_index = 0;\n\n// taint source\nstruct taint_file __dfsan::tainted;\nstruct taint_socket __dfsan::tainted_socket;\n\n// Hash table\nstatic const uptr hashtable_size = (1ULL << 32);\nstatic const size_t hashtable_buckets = (1ULL << 20);\nstatic __taint::union_hashtable __union_table(hashtable_buckets);\n\nFlags __dfsan::flags_data;\nbool print_debug;\n\n// The size of TLS variables. These constants must be kept in sync with the ones\n// in Taint.cc\nstatic const int kArgTlsSize = 800;\nstatic const int kRetvalTlsSize = 800;\n\nSANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL uint64_t\n    __dfsan_retval_tls[kRetvalTlsSize / sizeof(uint64_t)];\nSANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL uint64_t\n    __dfsan_arg_tls[kArgTlsSize / sizeof(uint64_t)];\n\nSANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;\n\n// On Linux/x86_64, memory is laid out as follows:\n//\n// +--------------------+ 0x800000000000 (top of memory)\n// | application memory |\n// +--------------------+ 0x700000040000 (kAppAddr)\n// |--------------------| UnusedAddr()\n// |                    |\n// |    union table     |\n// |                    |\n// +--------------------+ 0x400100000000 (kUnionTableAddr)\n// |    hash table      |\n// +--------------------+ 0x400000000000 (kHashTableAddr)\n// |   shadow memory    |\n// +--------------------+ 0x000000100000 (kShadowAddr)\n// |       unused       |\n// +--------------------+ 0x000000010000 (kKernelAddr)\n// | reserved by kernel |\n// +--------------------+ 0x000000000000\n//\n// To derive a shadow memory address from an application memory address,\n// bits 44-46 are cleared to bring the address into the range\n// [0x000000040000,0x100000000000).  Then the address is shifted left by 2 to\n// account for the double byte representation of shadow labels and move the\n// address into the shadow memory range.  See the function shadow_for below.\n\n#ifdef DFSAN_RUNTIME_VMA\n// Runtime detected VMA size.\nint __dfsan::vmaSize;\n#endif\n\nstatic uptr UnusedAddr() {\n  return MappingArchImpl<MAPPING_UNION_TABLE_ADDR>() + uniontable_size;\n}\n\n// Checks we do not run out of labels.\nstatic void dfsan_check_label(dfsan_label label) {\n  if (label == kInitializingLabel) {\n    Report(\"FATAL: Taint: out of labels\\n\");\n    Die();\n  }\n  // Alloca labels are in range [__alloca_stack_top, __alloca_stack_bottom]\n  if (label >= __alloca_stack_top && label <= __alloca_stack_bottom) {\n    return; // Valid Alloca label\n  }\n  // For regular labels, check against __dfsan_last_label\n  dfsan_label last = atomic_load(&__dfsan_last_label, memory_order_relaxed);\n  if (label > last) {\n    Report(\"FATAL: Invalid label %u > last %u\\n\", label, last);\n    Die();\n  }\n}\n\n// based on https://github.com/Cyan4973/xxHash\n// simplified since we only have 12 bytes info\nstatic inline uint32_t xxhash(uint32_t h1, uint32_t h2, uint32_t h3) {\n  const uint32_t PRIME32_1 = 2654435761U;\n  const uint32_t PRIME32_2 = 2246822519U;\n  const uint32_t PRIME32_3 = 3266489917U;\n  const uint32_t PRIME32_4 =  668265263U;\n  const uint32_t PRIME32_5 =  374761393U;\n\n  #define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))\n  uint32_t h32 = PRIME32_5;\n  h32 += h1 * PRIME32_3;\n  h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n  h32 += h2 * PRIME32_3;\n  h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n  h32 += h3 * PRIME32_3;\n  h32  = XXH_rotl32(h32, 17) * PRIME32_4;\n  #undef XXH_rotl32\n\n  h32 ^= h32 >> 15;\n  h32 *= PRIME32_2;\n  h32 ^= h32 >> 13;\n  h32 *= PRIME32_3;\n  h32 ^= h32 >> 16;\n\n  return h32;\n}\n\ndfsan_label_info* __dfsan::get_label_info(dfsan_label label) {\n  return &__dfsan_label_info[label];\n}\n\nstatic inline bool is_constant_label(dfsan_label label) {\n  return label == CONST_LABEL;\n}\n\nstatic inline bool is_kind_of_label(dfsan_label label, uint16_t kind) {\n  return get_label_info(label)->op == kind;\n}\n\nstatic bool isZeroOrPowerOfTwo(uint16_t x) { return (x & (x - 1)) == 0; }\n\nstatic inline bool is_valid_op(uint16_t op) {\n  op &= 0xff;\n  return op >= __dfsan::Add && op < __dfsan::LastOp || op == __dfsan::Not;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_trace_cond(dfsan_label label, bool r, uint8_t flag, uint32_t cid);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\ndfsan_label __taint_union(dfsan_label l1, dfsan_label l2, uint16_t op,\n                          uint16_t size, uint64_t op1, uint64_t op2) {\n  if (!is_valid_op(op)) {\n    AOUT(\"WARNING: invalid op %d\\n\", op);\n    return 0;\n  }\n  if (l1 > l2 && is_commutative(op)) {\n    // needs to swap both labels and concretes\n    Swap(l1, l2);\n    Swap(op1, op2);\n  }\n  if (l1 == 0 && l2 < CONST_OFFSET &&\n      op != fsize && op != __dfsan::Alloca)\n    return 0;\n  if (l1 == kInitializingLabel || l2 == kInitializingLabel)\n    return kInitializingLabel;\n\n  // special handling for bounds\n  if (get_label_info(l1)->op == __dfsan::Alloca ||\n      (op != __dfsan::Load && get_label_info(l2)->op == __dfsan::Alloca)) {\n    // propagate if it's casting op\n    if (op == __dfsan::BitCast) return l1;\n    if (op == __dfsan::PtrToInt) {AOUT(\"WARNING: ptrtoint %d\\n\", l1); return 0;}\n    if ((op & 0xff) == __dfsan::ICmp) { return 0;} // ptr1 op ptr2\n    if (op != __dfsan::Extract) {\n      AOUT(\"WARNING: unsupported op %d over ptr1 %d ptr2 %d\\n\", op, l1, l2);\n      return 0;\n    }\n  }\n\n  // backup old op-values\n  uint64_t orig_op1 = op1, orig_op2 = op2;\n\n  // Preserve op1/op2 for certain operations:\n  // - Alloca: uses op1/op2 for bounds tracking\n  // - ICmp: records both operands for comparison\n  // - Higher-order ops (>= fmemcmp): use op1/op2 for various purposes\n  if (op == __dfsan::fmemcmp) {\n    // fmemcmp special: copy up to 8 bytes of the data for i2s inference\n    uint16_t len = size > 8 ? 8 : size; // for fmemcmp, size is in bytes, not bits\n    if (l1 >= CONST_OFFSET) internal_memcpy(&op1, (void*)op1, len);\n    if (l2 >= CONST_OFFSET) internal_memcpy(&op2, (void*)op2, len);\n  } else if (op < __dfsan::fmemcmp &&\n             op != __dfsan::Alloca &&\n             op != __dfsan::PtrToInt &&\n             (op & 0xff) != __dfsan::ICmp) {\n    // Not a higher-order op and not Alloca/ICmp/PtrToInt - zero out for symbolic operands\n    // PtrToInt needs op1 preserved to compute base pointer for string ops\n    if (l1 >= CONST_OFFSET) op1 = 0;\n    if (l2 >= CONST_OFFSET) op2 = 0;\n  }\n\n  // try simple simplifications, from qsym\n  bool op1_is_zero = (l1 == 0 && op1 == 0);\n  bool op1_is_all_one = (l1 == 0 && op1 == ((uint64_t)1 << size) - 1);\n  bool op2_is_zero = (l2 == 0 && op2 == 0);\n  if (op1_is_zero) {\n    switch (op) {\n      case __dfsan::And: // 0 & x = 0\n      case __dfsan::Mul: // 0 * x = 0\n      case __dfsan::Shl: // 0 << x = 0\n        return 0;\n      case __dfsan::Or: // 0 | x = x\n      case __dfsan::Xor: // 0 ^ x = x\n      case __dfsan::Add: // 0 + x = x\n        return l2;\n    }\n  } else if (op1_is_all_one) {\n    if (op == __dfsan::And) return l2; // 0b11..1 & x = x\n    else if (op == __dfsan::Or) return 0; // 0b11..1 | x = 11..1b\n    else if (op == __dfsan::Xor && size == 1) op = __dfsan::Not; // 0b1 ^ x = !x\n  }\n  if (op2_is_zero) {\n    if (op == __dfsan::Sub) return l1; // x - 0 = x\n    else if (op == __dfsan::Shl) return l1; // x << 0 = x\n    else if (op == __dfsan::LShr) return l1; // x >> 0 = x\n    else if (op == __dfsan::AShr) return l1; // x >> 0 = x\n  }\n  if (op == __dfsan::Trunc) {\n    if (__dfsan_label_info[l1].op == __dfsan::ZExt ||\n        __dfsan_label_info[l1].op == __dfsan::SExt) {\n      dfsan_label base = __dfsan_label_info[l1].l1;\n      if (size == __dfsan_label_info[base].size) return base;\n    }\n  } else if (op == __dfsan::Xor && l1 == l2) {\n    // x ^ x = 0\n    return 0;\n  }\n\n  // setup a hash tree for dedup\n  uint32_t h1 = l1 ? __dfsan_label_info[l1].hash : 0;\n  uint32_t h2 = l2 ? __dfsan_label_info[l2].hash : 0;\n  uint32_t h3 = op;\n  h3 = (h3 << 16) | size;\n  uint32_t hash = xxhash(h1, h2, h3);\n\n  struct dfsan_label_info label_info = {\n    .l1 = l1, .l2 = l2, .op1 = {op1}, .op2 = {op2}, .op = op, .size = size,\n    .hash = hash};\n\n  __taint::option res = __union_table.lookup(label_info);\n  if (res != __taint::none()) {\n    dfsan_label label = *res;\n    AOUT(\"%u found\\n\", label);\n    return label;\n  }\n\n  // ubsan checks, after dedup, so we don't do redundant checks\n  if (l2 && flags().solve_ub) {\n    dfsan_label cond = 0;\n    uint16_t op_size = get_label_info(l2)->size;\n    switch(op & 0xff) {\n      case __dfsan::Add:\n      case __dfsan::Sub:\n      case __dfsan::Mul:\n        // check for integer overflow\n        break;\n      case __dfsan::UDiv:\n      case __dfsan::SDiv:\n      case __dfsan::URem:\n      case __dfsan::SRem:\n        // check for division by zero\n        // -fsanitize=integer-divide-by-zero\n        if (orig_op2 != 0) {\n          cond = __taint_union(l2, 0, (bveq << 8) | __dfsan::ICmp, size,\n                               orig_op2, 0);\n          __taint_trace_cond(cond, 0, UndefinedCheck, ub_division_by_zero);\n        }\n        break;\n      case __dfsan::Shl:\n      case __dfsan::LShr:\n      case __dfsan::AShr:\n        // -fsanitize=shift-exponent\n        // check for too large value: exponent > size\n        if (orig_op2 < size) {\n          cond = __taint_union(l2, 0, (bvuge << 8) | __dfsan::ICmp,\n                              op_size, orig_op2, size),\n          __taint_trace_cond(cond, 0, UndefinedCheck, ub_shift_exponent);\n        }\n        if ((int64_t)orig_op2 >= 0) {\n          // check for negative value\n          cond = __taint_union(l2, 0, (bvslt << 8) | __dfsan::ICmp,\n                               op_size, orig_op2, 0),\n          __taint_trace_cond(cond, 0, UndefinedCheck, ub_shift_exponent);\n        }\n        if (op == __dfsan::Shl && orig_op1 != 0 &&\n            orig_op2 <= __builtin_clzl(orig_op1) - (64 - size)) {\n          // check for shift overflow\n          // op2 > leading zero bits in op1\n          cond = __taint_union(l2, 0, (bvugt << 8) | __dfsan::ICmp, op_size,\n                               orig_op2, __builtin_clzl(orig_op1) - (64 - size));\n          __taint_trace_cond(cond, 0, UndefinedCheck, ub_shift_overflow);\n        }\n        if (l1 && (int64_t)orig_op1 >= 0) {\n          // check for negative base\n          // -fsanitize=shift-base\n          // op1 < 0\n          cond = __taint_union(l1, 0, (bvslt << 8) | __dfsan::ICmp,\n                               get_label_info(l1)->size, orig_op1, 0);\n          __taint_trace_cond(cond, 0, UndefinedCheck, ub_shift_base);\n        }\n        break;\n      default:\n        break;\n    }\n  }\n\n  dfsan_label label =\n    atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;\n  dfsan_check_label(label);\n  assert(label > l1 && label > l2);\n\n  AOUT(\"%u = (%u, %u, %u, %u, %lu, %lu)\\n\", label, l1, l2, op, size, op1, op2);\n\n  internal_memcpy(&__dfsan_label_info[label], &label_info, sizeof(dfsan_label_info));\n  __union_table.insert(&__dfsan_label_info[label], label);\n\n  if (flags().solve_ub) {\n    if (op == __dfsan::Trunc && l1) {\n      // check for data loss, after the new label is created\n      // -fsanitize=implicit-unsigned-integer-truncation\n      // old_vale >= (1 << new_size)\n      if (orig_op1 < (1UL << size)) {\n        // if current value does not have loss\n        dfsan_label loss = __taint_union(l1, 0, (bvuge << 8) | __dfsan::ICmp,\n                                        get_label_info(l1)->size, orig_op1,\n                                        1UL << size);\n        __taint_trace_cond(loss, 0, UndefinedCheck, ub_unsigned_integer_truncation);\n      }\n      // -fsanitize=implicit-signed-integer-truncation\n      // old_value < signed(1 << (size - 1))\n      int64_t target = (int64_t)((0xFFFFFFFFFFFFFFFFUL >> (size-1)) << (size-1));\n      if ((int64_t)orig_op1 >= target) {\n        uint16_t old_size = get_label_info(l1)->size;\n        if (old_size < 64) target &= ~(1UL << old_size);\n        dfsan_label loss = __taint_union(l1, 0, (bvslt << 8) | __dfsan::ICmp,\n                                        old_size, orig_op1, target);\n        __taint_trace_cond(loss, 0, UndefinedCheck, ub_signed_integer_truncation);\n      }\n\n      // -fsanitize=implicit-integer-sign-change\n      // Check if sign bit changed during truncation\n      {\n        uint16_t src_size = get_label_info(l1)->size;\n        const uint64_t new_mask = size == 64 ? 0xFFFFFFFFFFFFFFFFUL : (1UL << size) - 1;\n        uint64_t src_sign_bit = 1ULL << (src_size - 1);\n        uint64_t dst_sign_bit = 1ULL << (size - 1);\n        bool src_sign = (orig_op1 & src_sign_bit) != 0;\n        bool dst_sign = ((orig_op1 & new_mask) & dst_sign_bit) != 0;\n        if (src_sign == dst_sign) {\n          // Currently no sign change, check if it can happen\n          // Sign changes when: sign_bit(l1) != sign_bit(label)\n          // We check: (l1 < 0) XOR (label < 0)\n          dfsan_label src_neg = __taint_union(l1, 0, (bvslt << 8) | __dfsan::ICmp,\n                                              src_size, orig_op1, 0);\n          dfsan_label dst_neg = __taint_union(label, 0, (bvslt << 8) | __dfsan::ICmp,\n                                              size, orig_op1 & new_mask, 0);\n          dfsan_label sign_diff = __taint_union(src_neg, dst_neg, __dfsan::Xor, 1,\n                                                src_sign ? 1 : 0, dst_sign ? 1 : 0);\n          __taint_trace_cond(sign_diff, 0, UndefinedCheck, ub_integer_sign_change);\n        }\n      }\n    } else if (op == __dfsan::Add) {\n      // check for integer overflow\n      // -fsanitize=signed-integer-overflow, unsigned-integer-overflow\n      //\n      // we only care about l2, which is always symbolic\n      const uint64_t mask = size == 64 ? 0xFFFFFFFFFFFFFFFFUL : (1UL << size) - 1;\n      uint64_t result = (orig_op1 + orig_op2) & mask;\n\n      // Signed overflow detection:\n      // Overflow occurs when ((op1 ^ result) & (op2 ^ result)) has sign bit set\n      // This means both operands had same sign, but result has different sign\n      uint64_t xor1 = (orig_op1 ^ result) & mask;\n      uint64_t xor2 = (orig_op2 ^ result) & mask;\n      uint64_t overflow_check = xor1 & xor2;\n      uint64_t sign_bit = 1ULL << (size - 1);\n      bool has_signed_overflow = (overflow_check & sign_bit) != 0;\n\n      if (!has_signed_overflow) {\n        // Build symbolic expression: ((l1 ^ label) & (l2 ^ label)) < 0\n        dfsan_label xor_l1 = __taint_union(l1, label, __dfsan::Xor, size, orig_op1, result);\n        dfsan_label xor_l2 = __taint_union(l2, label, __dfsan::Xor, size, orig_op2, result);\n        dfsan_label and_xors = __taint_union(xor_l1, xor_l2, __dfsan::And, size, xor1, xor2);\n        dfsan_label cond = __taint_union(and_xors, 0, (bvslt << 8) | __dfsan::ICmp,\n                                         size, overflow_check, 0);\n        __taint_trace_cond(cond, 0, UndefinedCheck, ub_integer_overflow);\n      }\n\n      // Unsigned overflow: result < op1 (for any non-zero op2)\n      // When adding two unsigned numbers, overflow means result wrapped around\n      if (result >= orig_op1 && orig_op2 != 0) {\n        dfsan_label cond = __taint_union(label, l1, (bvult << 8) | __dfsan::ICmp,\n                                         size, result, orig_op1);\n        __taint_trace_cond(cond, 0, UndefinedCheck, ub_integer_overflow);\n      }\n    } else if (op == __dfsan::Mul) {\n      // check for integer overflow\n      // we only care about l2, which is always symbolic\n      const uint64_t mask = size == 64 ? 0xFFFFFFFFFFFFFFFFUL : (1UL << size) - 1;\n      uint64_t result = (orig_op1 * orig_op2) & mask;\n\n      // For multiplication, overflow is harder to detect symbolically\n      // Use the approach: if a != 0, then overflow iff result / a != b\n      // But we approximate with sign-based check similar to addition\n      uint64_t xor1 = (orig_op1 ^ result) & mask;\n      uint64_t xor2 = (orig_op2 ^ result) & mask;\n      uint64_t overflow_check = xor1 & xor2;\n      uint64_t sign_bit = 1ULL << (size - 1);\n\n      // For signed multiplication: check if signs are inconsistent\n      // Product of same signs should be positive, different signs should be negative\n      // This is an approximation - full check would need wider multiplication\n      bool has_signed_overflow = (overflow_check & sign_bit) != 0;\n\n      if (!has_signed_overflow && orig_op1 != 0 && orig_op2 != 0) {\n        dfsan_label xor_l1 = __taint_union(l1, label, __dfsan::Xor, size, orig_op1, result);\n        dfsan_label xor_l2 = __taint_union(l2, label, __dfsan::Xor, size, orig_op2, result);\n        dfsan_label and_xors = __taint_union(xor_l1, xor_l2, __dfsan::And, size, xor1, xor2);\n        dfsan_label cond = __taint_union(and_xors, 0, (bvslt << 8) | __dfsan::ICmp,\n                                         size, overflow_check, 0);\n        __taint_trace_cond(cond, 0, UndefinedCheck, ub_integer_overflow);\n      }\n\n      // Unsigned overflow: for multiplication, check if result / op1 != op2 (when op1 != 0)\n      if (orig_op1 != 0 && result / orig_op1 == orig_op2) {\n        // No overflow currently, check if overflow can happen\n        // Approximate: result < op1 || result < op2 when both > 1\n        if (orig_op1 > 1 && orig_op2 > 1) {\n          dfsan_label cond = __taint_union(label, l1, (bvult << 8) | __dfsan::ICmp,\n                                           size, result, orig_op1);\n          __taint_trace_cond(cond, 0, UndefinedCheck, ub_integer_overflow);\n        }\n      }\n    } else if (op == __dfsan::Sub) {\n      // check for integer overflow (underflow for subtraction)\n      // -fsanitize=signed-integer-overflow, unsigned-integer-overflow\n      const uint64_t mask = size == 64 ? 0xFFFFFFFFFFFFFFFFUL : (1UL << size) - 1;\n      uint64_t result = (orig_op1 - orig_op2) & mask;\n\n      // Signed overflow detection for subtraction:\n      // Overflow occurs when sign(a) != sign(b) and sign(result) != sign(a)\n      // Formula: (a ^ b) & (a ^ result) has sign bit set\n      // Examples:\n      //   INT_MAX - (-1) = overflow (positive - negative, result should be more positive but wraps)\n      //   INT_MIN - 1 = overflow (negative - positive, result should be more negative but wraps)\n      uint64_t xor_ab = (orig_op1 ^ orig_op2) & mask;\n      uint64_t xor_ar = (orig_op1 ^ result) & mask;\n      uint64_t overflow_check = xor_ab & xor_ar;\n      uint64_t sign_bit = 1ULL << (size - 1);\n      bool has_signed_overflow = (overflow_check & sign_bit) != 0;\n\n      if (!has_signed_overflow) {\n        // Build symbolic expression: ((l1 ^ l2) & (l1 ^ label)) < 0\n        dfsan_label xor_l1l2 = __taint_union(l1, l2, __dfsan::Xor, size, orig_op1, orig_op2);\n        dfsan_label xor_l1r = __taint_union(l1, label, __dfsan::Xor, size, orig_op1, result);\n        dfsan_label and_xors = __taint_union(xor_l1l2, xor_l1r, __dfsan::And, size, xor_ab, xor_ar);\n        dfsan_label cond = __taint_union(and_xors, 0, (bvslt << 8) | __dfsan::ICmp,\n                                         size, overflow_check, 0);\n        __taint_trace_cond(cond, 0, UndefinedCheck, ub_integer_overflow);\n      }\n\n      // Unsigned underflow: result > op1 when op2 > 0\n      // When subtracting, if a < b, result wraps around to large value (result > a)\n      if (result <= orig_op1 && orig_op2 != 0) {\n        dfsan_label cond = __taint_union(label, l1, (bvugt << 8) | __dfsan::ICmp,\n                                         size, result, orig_op1);\n        __taint_trace_cond(cond, 0, UndefinedCheck, ub_integer_overflow);\n      }\n    }\n  }\n  return label;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\ndfsan_label __taint_union_load(const dfsan_label *ls, uptr n, uint64_t align) {\n  if ((uptr)ls < 4096) {\n    AOUT(\"WARNING: nullptr deref\\n\");\n    return 0;\n  } else if (((uptr)ls & (align - 1)) != 0) {\n    AOUT(\"WARNING: unaligned load %p\\n\", ls);\n  }\n  dfsan_label label0 = ls[0];\n  if (label0 == kInitializingLabel) return kInitializingLabel;\n\n  // for debugging\n  // dfsan_label l = atomic_load(&__dfsan_last_label, memory_order_relaxed);\n  // assert(label0 <= l);\n  if (label0 >= CONST_OFFSET) assert(get_label_info(label0)->size != 0);\n\n  // fast path 1: constant and bounds\n  if (is_constant_label(label0) || is_kind_of_label(label0, Alloca)) {\n    bool same = true;\n    for (uptr i = 1; i < n; i++) {\n      if (ls[i] == kInitializingLabel) return kInitializingLabel;\n      else if (ls[i] != label0) {\n        same = false;\n        break;\n      }\n    }\n    if (same) return label0;\n  }\n  AOUT(\"label0 = %d, n = %lu, ls = %p\\n\", label0, n, ls);\n\n  // shape\n  bool shape = true;\n  if (__dfsan_label_info[label0].op != 0) {\n    // not raw input bytes\n    shape = false;\n  } else {\n    off_t offset = get_label_info(label0)->op1.i;\n    for (uptr i = 1; i != n; ++i) {\n      dfsan_label next_label = ls[i];\n      if (next_label == kInitializingLabel) return kInitializingLabel;\n      else if (get_label_info(next_label)->op1.i != offset + i) {\n        shape = false;\n        break;\n      }\n    }\n  }\n  if (shape) {\n    if (n == 1) return label0;\n\n    AOUT(\"shape: label0: %d %lu\\n\", label0, n);\n    return __taint_union(label0, (dfsan_label)n, Load, n * 8, 0, 0);\n  }\n\n  // fast path 2: all labels are extracted from a n-size label,\n  // then return that label\n  if (is_kind_of_label(label0, Extract)) {\n    dfsan_label parent = get_label_info(label0)->l1;\n    uptr offset = 0;\n    for (uptr i = 0; i < n; i++) {\n      dfsan_label next_label = ls[i];\n      if (next_label == kInitializingLabel) return kInitializingLabel;\n      dfsan_label_info *info = get_label_info(next_label);\n      if (info->op != Extract || offset != info->op2.i || parent != info->l1) {\n        break;\n      }\n      offset += info->size;\n    }\n    if (get_label_info(parent)->size == offset && offset == n * 8) {\n      AOUT(\"Fast path (2): all labels are extracts: %u\\n\", parent);\n      return parent;\n    }\n  }\n\n  // slowpath\n  AOUT(\"union load slowpath at %p\\n\", __builtin_return_address(0));\n  dfsan_label label = label0;\n  for (uptr i = get_label_info(label0)->size / 8; i < n;) {\n    dfsan_label next_label = ls[i];\n    if (next_label == kInitializingLabel) return kInitializingLabel;\n    uint16_t next_size = get_label_info(next_label)->size;\n    AOUT(\"next label=%u, size=%u\\n\", next_label, next_size);\n    if (!is_constant_label(next_label)) {\n      if (next_size <= (n - i) * 8) {\n        i += next_size / 8;\n        label = __taint_union(label, next_label, Concat, i * 8, 0, 0);\n      } else {\n        Report(\"WARNING: partial loading expected=%lu has=%d\\n\", n-i, next_size);\n        uptr size = n - i;\n        dfsan_label trunc = __taint_union(next_label, CONST_LABEL, Trunc, size * 8, 0, 0);\n        return __taint_union(label, trunc, Concat, n * 8, 0, 0);\n      }\n    } else {\n      Report(\"WARNING: taint mixed with concrete %lu\\n\", i);\n      char *c = (char *)app_for(&ls[i]);\n      ++i;\n      label = __taint_union(label, 0, Concat, i * 8, 0, *c);\n    }\n  }\n  AOUT(\"\\n\");\n  return label;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_union_store(dfsan_label l, dfsan_label *ls, uptr n, uint64_t align) {\n  //AOUT(\"label = %d, n = %lu, ls = %p\\n\", l, n, ls);\n  if ((uptr)ls < 4096) {\n    AOUT(\"WARNING: nullptr deref\\n\");\n    return;\n  } else if (((uptr)ls & (align - 1)) != 0) {\n    AOUT(\"WARNING: unaligned store %p\\n\", ls);\n  }\n  if (l != kInitializingLabel) {\n    // for debugging\n    dfsan_label h = atomic_load(&__dfsan_last_label, memory_order_relaxed);\n    assert(l <= __alloca_stack_bottom);\n    if (l > h && l < __alloca_stack_top) {\n      AOUT(\"WARNING: unallocated label %d > %d, and < %d\\n\",\n           l, h, __alloca_stack_top);\n    }\n  } else {\n    for (uptr i = 0; i < n; ++i)\n      ls[i] = l;\n    return;\n  }\n\n  // fast path 1: constant and bounds\n  if (l == 0 || is_kind_of_label(l, Alloca)) {\n    for (uptr i = 0; i < n; ++i)\n      ls[i] = l;\n    return;\n  }\n\n  dfsan_label_info *info = get_label_info(l);\n  // fast path 2: single byte\n  if (n == 1 && info->size == 8) {\n    ls[0] = l;\n    return;\n  }\n\n  // fast path 3: load\n  if (is_kind_of_label(l, Load)) {\n    // if source label is union load, just break it up\n    dfsan_label label0 = info->l1;\n    if (n > info->l2) {\n      Report(\"WARNING: store size=%lu larger than load size=%d\\n\", n, info->l2);\n    }\n    for (uptr i = 0; i < n; ++i)\n      ls[i] = label0 + i;\n    return;\n  }\n\n  // default fall through\n  for (uptr i = 0; i < n; ++i) {\n    ls[i] = __taint_union(l, CONST_LABEL, Extract, 8, 0, i * 8);\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_push_stack_frame() {\n  if (flags().trace_bounds) {\n    if (__current_saved_stack_index < MAX_SAVED_STACK_ENTRIES)\n      __saved_alloca_stack_top[++__current_saved_stack_index] = __alloca_stack_top;\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_pop_stack_frame() {\n  if (flags().trace_bounds) {\n    __alloca_stack_top = __saved_alloca_stack_top[__current_saved_stack_index--];\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\ndfsan_label __taint_trace_alloca(dfsan_label l, uint64_t size,\n                                 uint64_t elem_size, uint64_t base) {\n  if (flags().trace_bounds) {\n    __alloca_stack_top -= 1;\n    AOUT(\"label = %d, base = %p, size = %lu, elem_size = %lu\\n\",\n        __alloca_stack_top, (void*)base, size, elem_size);\n    dfsan_label_info *info = get_label_info(__alloca_stack_top);\n    internal_memset(info, 0, sizeof(dfsan_label_info));\n    info->l2    = l;\n    info->op    = Alloca;\n    info->size  = sizeof(void*) * 8;\n    info->op1.i = base;\n    info->op2.i = base + size * elem_size;\n\n    // set uninit label\n    dfsan_set_label(kInitializingLabel, (void*)base, size * elem_size);\n\n    return __alloca_stack_top;\n  } else {\n    return 0;\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\ndfsan_label __taint_trace_global(uint64_t addr, uint64_t size) {\n  if (flags().trace_bounds) {\n    // setup a hash tree for dedup\n    uint32_t h1 = (uint32_t)addr; // lower 32 bits\n    uint32_t h2 = (uint32_t)(addr >> 32); // upper 32 bits\n    uint32_t hash = xxhash(h1, h2, Alloca);\n\n    struct dfsan_label_info label_info = {\n      .l1 = 0, .l2 = 0, .op1 = {addr}, .op2 = {addr + size},\n      .op = __dfsan::Alloca, .size = sizeof(void*) * 8, .hash = hash};\n\n    __taint::option res = __union_table.lookup(label_info);\n    if (res != __taint::none()) {\n      dfsan_label label = *res;\n      AOUT(\"global %u found\\n\", label);\n      return label;\n    }\n\n    dfsan_label label =\n      atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;\n    dfsan_check_label(label);\n    internal_memcpy(&__dfsan_label_info[label], &label_info, sizeof(dfsan_label_info));\n    __union_table.insert(&__dfsan_label_info[label], label);\n\n    AOUT(\"adding global bounds %d=(%lx, %lu)\\n\", label, addr, size);\n\n    return label;\n  }\n\n  return 0;\n}\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_memerr, dfsan_label, uptr,\n                             dfsan_label, uint64_t, uint16_t, void*) {}\n\n// NOTES: for Alloca, or buffer buounds info\n// .l1 = num of elements label, for calloc style allocators\n// .l2 = (element) size label\n// .op1 = lower bounds\n// .op2 = upper bounds\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_check_bounds(dfsan_label addr_label, uptr addr,\n                          dfsan_label size_label, uint64_t size) {\n  if (flags().trace_bounds) {\n    void *retaddr = __builtin_return_address(0);\n    if (addr == 0) {\n      AOUT(\"WARNING: null ptr deref %p = %d @%p\\n\", (void*)addr, addr_label, retaddr);\n      __taint_trace_memerr(addr_label, addr, size_label, size, F_MEMERR_NULL, retaddr);\n      if (flags().exit_on_memerror) Die();\n      else return;\n    }\n    if (addr_label == kInitializingLabel) {\n      AOUT(\"WARNING: uninitialized memory %p = %d @%p\\n\", (void*)addr, addr_label, retaddr);\n      __taint_trace_memerr(addr_label, addr, size_label, size, F_MEMERR_UBI, retaddr);\n      if (flags().exit_on_memerror) Die();\n      else return;\n    }\n    dfsan_label_info *info = get_label_info(addr_label);\n    if (info->op == __dfsan::Free) {\n      // UAF\n      AOUT(\"ERROR: UAF detected %p = %d @%p\\n\", (void*)addr, addr_label, retaddr);\n      __taint_trace_memerr(addr_label, addr, size_label, size, F_MEMERR_UAF, retaddr);\n      if (flags().exit_on_memerror) Die();\n    } else if (info->op == __dfsan::Alloca) {\n      AOUT(\"addr = %p, lower = %p, upper = %p\\n\",\n           (void*)addr, (void*)info->op1.i, (void*)info->op2.i);\n      if (addr < info->op1.i) {\n        AOUT(\"ERROR: OOB underflow detected %p = %d, %lu = %d @%p\\n\",\n             (void*)addr, addr_label, size, size_label, retaddr);\n        __taint_trace_memerr(addr_label, addr, size_label, size, F_MEMERR_OLB, retaddr);\n        if (flags().exit_on_memerror) Die();\n      } else if ((addr + size) > info->op2.i || (addr + size) < info->op1.i) {\n        AOUT(\"ERROR: OOB overflow detected %p = %d, %lu = %d @%p\\n\",\n             (void*)addr, addr_label, size, size_label, __builtin_return_address(0));\n        __taint_trace_memerr(addr_label, addr, size_label, size, F_MEMERR_OUB, retaddr);\n        if (flags().exit_on_memerror) Die();\n      }\n    } else if (addr_label != 0) {\n      AOUT(\"WARNING: incorrect label %p = %d @%p\\n\",\n           (void*)addr, addr_label, __builtin_return_address(0));\n    }\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_solve_bounds(dfsan_label ptr_label, uint64_t ptr,\n                          dfsan_label index_label, int64_t index,\n                          uint64_t num_elems, uint64_t elem_size,\n                          int64_t current_offset, uint32_t cid) {\n  if (index_label == 0 || !flags().solve_ub)\n    return;\n\n  void *addr = __builtin_return_address(0);\n\n  if (index_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", index_label, addr);\n    __taint_trace_memerr(ptr_label, ptr, index_label, index, F_MEMERR_UBI, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n  if (ptr_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized label %u @%p\\n\", ptr_label, addr);\n    __taint_trace_memerr(ptr_label, ptr, index_label, index, F_MEMERR_UBI, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n\n  AOUT(\"solve bounds: %ld = %d, ne: %ld, es: %ld, offset: %ld\\n\",\n      index, index_label, num_elems, elem_size, current_offset);\n\n  // construct bounds solving tasks here\n  uint16_t index_bits = get_label_info(index_label)->size;\n  if (num_elems > 0) {\n    // array with known size\n    //\n    // check underflow, index < 0\n    dfsan_label lb = __taint_union(index_label, 0, (bvslt << 8) | ICmp,\n                                   index_bits, index, 0);\n    // assume the result is false, as bounds check should happen before solving\n    // no flag, no nested\n    __taint_trace_cond(lb, 0, UndefinedCheck, ub_index_underflow);\n\n    // check overflow, index >= num_elems\n    dfsan_label ub = __taint_union(index_label, 0, (bvsge << 8) | ICmp,\n                                   index_bits, index, num_elems);\n    __taint_trace_cond(ub, 0, UndefinedCheck, ub_index_overflow);\n  } else {\n    // array with unknown size\n    dfsan_label_info *bounds_info = get_label_info(ptr_label);\n    if (bounds_info->op == __dfsan::Alloca) {\n      // bounds information is available, check if allocation size is symbolic\n      if (index_bits < 64) // extends index to 64 bits\n        index_label = __taint_union(index_label, 0, ZExt, 64, index, 0);\n      if (bounds_info->l2 == 0) {\n        // concrete allocation size, check bounds\n        // check underflow, index * elem_size + current_offset + ptr < lower_bound\n        // => index < (lower_bound - current_offset - ptr) / elem_size\n        uint64_t lower_bound =\n            (bounds_info->op1.i - current_offset - ptr) / elem_size;\n        dfsan_label lb = __taint_union(index_label, 0, (bvult << 8) | ICmp,\n                                       64, index, lower_bound);\n        __taint_trace_cond(lb, 0, UndefinedCheck, ub_index_underflow);\n\n        // check overflow, (index + 1) * elem_size + current_offset + ptr > upper_bound\n        // => index > (upper_bound - current_offset - ptr) / elem_size - 1\n        uint64_t upper_bound =\n            (bounds_info->op2.i - current_offset - ptr) / elem_size - 1;\n        dfsan_label ub = __taint_union(index_label, 0, (bvugt << 8) | ICmp,\n                                       64, index, upper_bound);\n        __taint_trace_cond(ub, 0, UndefinedCheck, ub_index_overflow);\n      } else {\n        // index * elem_size + current_offset + (ptr - lower_bound) > array_size * alloc_elem_size\n        dfsan_label size_label = elem_size == 1 ? index_label :\n            __taint_union(index_label, 0, Mul, 64, index, elem_size);\n        uint64_t size = index * elem_size;\n        uint64_t offset = current_offset + ptr - bounds_info->op1.i;\n        size_label = offset == 0 ? size :\n            __taint_union(size_label, 0, Add, 64, size, offset);\n        size += offset;\n        uint64_t alloc_size = bounds_info->op2.i - bounds_info->op1.i;\n        dfsan_label overflow =\n            __taint_union(size_label, bounds_info->l2, (bvugt << 8) | ICmp,\n                          64, size, alloc_size);\n        __taint_trace_cond(overflow, 0, UndefinedCheck, ub_integer_to_buffer_overflow);\n      }\n    } else {\n      // symbolic pointer but no bounds info?\n      AOUT(\"WARNING: symbolic pointer %p = %u with no bounds info @%p\\n\",\n           (void*)ptr, ptr_label, addr);\n      // check if null is possible?\n      dfsan_label null = __taint_union(ptr_label, 0, bveq, 64, ptr, 0);\n      __taint_trace_cond(null, 0, UndefinedCheck, ub_null_pointer);\n    }\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_solve_size(dfsan_label ptr_label, uint64_t ptr,\n                        dfsan_label size_label, uint64_t size,\n                        uint32_t cid) {\n  if (size_label == 0 || !flags().solve_ub)\n    return;\n\n  void *addr = __builtin_return_address(0);\n\n  if (size_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized size label %u @%p\\n\", size_label, addr);\n    __taint_trace_memerr(ptr_label, ptr, size_label, size, F_MEMERR_UBI, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n  if (ptr_label == kInitializingLabel) {\n    // uninitialized label\n    AOUT(\"WARNING: uninitialized pointer label %u @%p\\n\", ptr_label, addr);\n    __taint_trace_memerr(ptr_label, ptr, size_label, size, F_MEMERR_UBI, addr);\n    if (flags().exit_on_memerror) Die();\n    else return;\n  }\n\n  AOUT(\"solve size: %lu = %d, ptr: %p = %d\\n\",\n      size, size_label, (void*)ptr, ptr_label);\n\n  // construct size solving tasks here\n  uint16_t size_bits = get_label_info(size_label)->size;\n\n  // check overflow with buffer bounds if ptr has bounds info\n  if (ptr_label != 0) {\n    dfsan_label_info *bounds_info = get_label_info(ptr_label);\n    if (bounds_info->op == __dfsan::Alloca) {\n      // bounds information is available\n      if (size_bits < 64) // extend size to 64 bits\n        size_label = __taint_union(size_label, 0, ZExt, 64, size, 0);\n\n      if (bounds_info->l2 == 0) {\n        // concrete allocation size\n        // check underflow: ptr + size < lower_bound (wrap around)\n        // => size < lower_bound - ptr (when lower_bound > ptr, but this shouldn't happen in valid code)\n        // or equivalently, check that ptr < lower_bound (shouldn't happen)\n        uint64_t min_size = bounds_info->op1.i - ptr;\n        dfsan_label underflow = __taint_union(size_label, 0, (bvult << 8) | ICmp,\n                                              64, size, min_size);\n        __taint_trace_cond(underflow, 0, UndefinedCheck, ub_size_underflow);\n\n        // check overflow: ptr + size > upper_bound\n        // => size > upper_bound - ptr\n        uint64_t max_size = bounds_info->op2.i - ptr;\n        dfsan_label overflow = __taint_union(size_label, 0, (bvugt << 8) | ICmp,\n                                             64, size, max_size);\n        __taint_trace_cond(overflow, 0, UndefinedCheck, ub_size_overflow);\n      } else {\n        // symbolic allocation size\n        // check: size > alloc_size\n        uint64_t offset = ptr - bounds_info->op1.i;\n        uint64_t alloc_size = bounds_info->op2.i - bounds_info->op1.i;\n        dfsan_label adjusted_size = offset == 0 ? size_label :\n            __taint_union(size_label, 0, Add, 64, size, offset);\n        uint64_t actual_size = size + offset;\n        dfsan_label overflow = __taint_union(adjusted_size, bounds_info->l2,\n                                             (bvugt << 8) | ICmp, 64,\n                                             actual_size, alloc_size);\n        __taint_trace_cond(overflow, 0, UndefinedCheck, ub_size_to_buffer_overflow);\n      }\n    } else if (ptr_label != 0) {\n      // symbolic pointer but no bounds info\n      AOUT(\"WARNING: symbolic pointer %p = %u with no bounds info @%p\\n\",\n           (void*)ptr, ptr_label, addr);\n      // check if null is possible\n      dfsan_label null = __taint_union(ptr_label, 0, bveq, 64, ptr, 0);\n      __taint_trace_cond(null, 0, UndefinedCheck, ub_null_pointer);\n    }\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid dfsan_store_label(dfsan_label l, void *addr, uptr size) {\n  if (l == 0) return;\n  __taint_union_store(l, shadow_for(addr), size, sizeof(dfsan_label));\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __dfsan_unimplemented(char *fname) {\n  if (flags().warn_unimplemented)\n    Report(\"WARNING: DataFlowSanitizer: call to uninstrumented function %s\\n\",\n           fname);\n\n}\n\n// Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function\n// to try to figure out where labels are being introduced in a nominally\n// label-free program.\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_nonzero_label() {\n  if (flags().warn_nonzero_labels)\n    Report(\"WARNING: DataFlowSanitizer: saw nonzero label\\n\");\n}\n\n// Indirect call to an uninstrumented vararg function. We don't have a way of\n// handling these at the moment.\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__dfsan_vararg_wrapper(const char *fname) {\n  Report(\"FATAL: DataFlowSanitizer: unsupported indirect call to vararg \"\n         \"function %s\\n\", fname);\n  Die();\n}\n\n// Like __dfsan_union, but for use from the client or custom functions.  Hence\n// the equality comparison is done here before calling __dfsan_union.\nSANITIZER_INTERFACE_ATTRIBUTE dfsan_label\ndfsan_union(dfsan_label l1, dfsan_label l2, uint16_t op, uint16_t size,\n            uint64_t op1, uint64_t op2) {\n  return __taint_union(l1, l2, op, size, op1, op2);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\ndfsan_label dfsan_create_label(off_t offset) {\n  dfsan_label label =\n    atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;\n  dfsan_check_label(label);\n  internal_memset(&__dfsan_label_info[label], 0, sizeof(dfsan_label_info));\n  __dfsan_label_info[label].size = 8;\n  // label may not equal to offset when using stdin\n  __dfsan_label_info[label].op1.i = offset;\n  // init a non-zero hash\n  __dfsan_label_info[label].hash = xxhash(offset, 0, 8);\n  return label;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __dfsan_set_label(dfsan_label label, void *addr, uptr size) {\n  if (addr == 0) return;\n  for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) {\n    // Don't write the label if it is already the value we need it to be.\n    // In a program where most addresses are not labeled, it is common that\n    // a page of shadow memory is entirely zeroed.  The Linux copy-on-write\n    // implementation will share all of the zeroed pages, making a copy of a\n    // page when any value is written.  The un-sharing will happen even if\n    // the value written does not change the value in memory.  Avoiding the\n    // write when both |label| and |*labelp| are zero dramatically reduces\n    // the amount of real memory used by large programs.\n    if (label == *labelp)\n      continue;\n\n    // AOUT(\"%p = %u\\n\", addr, label);\n    *labelp = label;\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid dfsan_set_label(dfsan_label label, void *addr, uptr size) {\n  __dfsan_set_label(label, addr, size);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid dfsan_add_label(dfsan_label label, uint8_t op, void *addr, uptr size) {\n  for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp)\n    *labelp = __taint_union(*labelp, label, op, 1, 0, 0);\n}\n\n// Unlike the other dfsan interface functions the behavior of this function\n// depends on the label of one of its arguments.  Hence it is implemented as a\n// custom function.\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label\n__dfsw_dfsan_get_label(long data, dfsan_label data_label,\n                       dfsan_label *ret_label) {\n  *ret_label = 0;\n  return data_label;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE dfsan_label\ndfsan_read_label(const void *addr, uptr size) {\n  if (size == 0)\n    return 0;\n  return __taint_union_load(shadow_for(addr), size, sizeof(dfsan_label));\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE dfsan_label\ndfsan_get_label(const void *addr) {\n  return *shadow_for(addr);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\ndfsan_label_info *dfsan_get_label_info(dfsan_label label) {\n  dfsan_check_label(label);\n  return &__dfsan_label_info[label];\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE int\ndfsan_has_label(dfsan_label label, dfsan_label elem) {\n  if (label == kInitializingLabel || elem == kInitializingLabel) return false;\n  if (label == elem)\n    return true;\n  const dfsan_label_info *info = dfsan_get_label_info(label);\n  if (info->l1 != 0) {\n    return dfsan_has_label(info->l1, elem);\n  }\n  if (info->l2 != 0) {\n    return dfsan_has_label(info->l2, elem);\n  } \n  return false;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE uptr\ndfsan_get_label_count(void) {\n  dfsan_label max_label_allocated =\n      atomic_load(&__dfsan_last_label, memory_order_relaxed);\n\n  return static_cast<uptr>(max_label_allocated);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\ndfsan_dump_labels(int fd) {\n  dfsan_label last_label =\n      atomic_load(&__dfsan_last_label, memory_order_relaxed);\n\n  for (dfsan_label l = 1; l <= last_label; ++l) {\n    char buf[64];\n    internal_snprintf(buf, sizeof(buf), \"%u (%u %u %u %u)\", l,\n                      __dfsan_label_info[l].l1, __dfsan_label_info[l].l2,\n                      __dfsan_label_info[l].op, __dfsan_label_info[l].size);\n    WriteToFile(fd, buf, internal_strlen(buf));\n    WriteToFile(fd, \"\\n\", 1);\n  }\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_debug(dfsan_label op1, dfsan_label op2, int predicate,\n              uint32_t size, uint32_t target) {\n  if (op1 == 0 && op2 == 0) return;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\ntaint_set_file(int dirfd, const char *filename, int fd) {\n  char path[PATH_MAX];\n  if (dirfd != AT_FDCWD) {\n    // only resolve dirfd if not CWD\n    ssize_t len = readlinkat(dirfd, filename, path, sizeof(path));\n    if (len < 0) {\n      AOUT(\"WARNING: readlinkat failed %s\\n\", filename);\n      return;\n    }\n    path[len] = '\\0';\n  }\n  realpath(filename, path);\n  if (internal_strcmp(tainted.filename, path) == 0) {\n    tainted.fd = fd;\n    AOUT(\"fd:%d created\\n\", fd);\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\nis_taint_file(const char *filename) {\n  char path[PATH_MAX];\n  realpath(filename, path);\n  if (internal_strcmp(tainted.filename, path) == 0) {\n    tainted.is_utmp = 1;\n    return 1;\n  }\n  tainted.is_utmp = 0;\n  return 0;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE off_t\ntaint_get_file(int fd) {\n  AOUT(\"fd: %d\\n\", fd);\n  AOUT(\"tainted.fd: %d\\n\", tainted.fd);\n  if (tainted.fd == fd) {\n    return tainted.size;\n  } else if (flags().force_stdin && fd == 0) {\n    return tainted.size;\n  } else {\n    return 0;\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\ntaint_close_file(int fd) {\n  if (fd == tainted.fd) {\n    AOUT(\"close tainted.fd: %d\\n\", tainted.fd);\n    tainted.fd = -1;\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\nis_stdin_taint(void) {\n  return tainted.is_stdin;\n}\n\n// for utmp interface\nSANITIZER_INTERFACE_ATTRIBUTE int\nis_utmp_taint(void) {\n  return tainted.is_utmp;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\nset_utmp_offset(off_t offset) {\n  tainted.offset = offset;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE off_t\nget_utmp_offset() {\n  return tainted.offset;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\ntaint_set_offset_label(dfsan_label label) {\n  tainted.offset_label = label;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE dfsan_label\ntaint_get_offset_label() {\n  return tainted.offset_label;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\ntaint_set_socket(const void *addr, unsigned addrlen, int fd) {\n  const struct sockaddr *sa = (struct sockaddr *)addr;\n  AOUT(\"taint host %s:%d\\n\", tainted_socket.host, tainted_socket.port);\n  if (sa->sa_family != tainted_socket.family) return;\n\n  if (sa->sa_family == AF_INET) {\n    struct sockaddr_in *sin = (struct sockaddr_in *)sa;\n    if (tainted_socket.port != ntohs(sin->sin_port)) return;\n    struct in_addr addr;\n    inet_pton(AF_INET, tainted_socket.host, &addr);\n    if (addr.s_addr != sin->sin_addr.s_addr) return;\n    // family, port, and address match\n    AOUT(\"taint sockfd %d\\n\", fd);\n    tainted_socket.fd = fd;\n  } else if (sa->sa_family == AF_INET6) {\n    struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;\n    if (tainted_socket.port != ntohs(sin6->sin6_port)) return;\n    struct in6_addr addr;\n    inet_pton(AF_INET6, tainted_socket.host, &addr);\n    if (internal_memcmp(&addr, &sin6->sin6_addr, sizeof(addr)) != 0) return;\n    // family, port, and address match\n    AOUT(\"taint sockfd %d\\n\", fd);\n    tainted_socket.fd = fd;\n  } else if (sa->sa_family == AF_UNIX) {\n    struct sockaddr_un *sun = (struct sockaddr_un *)sa;\n    if (internal_strncmp(tainted_socket.host, sun->sun_path, sizeof(tainted_socket.host)) == 0) {\n      AOUT(\"taint sockfd %d\\n\", fd);\n      tainted_socket.fd = fd;\n    }\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE off_t\ntaint_get_socket(int fd) {\n  if (tainted_socket.fd == fd) {\n    return tainted_socket.offset;\n  } else if (flags().force_stdin) {\n    return tainted_socket.offset;\n  } else {\n    return -1;\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\ntaint_update_socket_offset(int fd, size_t size) {\n  if (tainted_socket.fd == fd)\n    tainted_socket.offset += size;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\ntaint_close_socket(int fd) {\n  if (tainted_socket.fd == fd) {\n    AOUT(\"close tainted_socket.fd: %d\\n\", tainted_socket.fd);\n    tainted_socket.fd = -1;\n  }\n}\n\nvoid Flags::SetDefaults() {\n#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;\n#include \"dfsan_flags.inc\"\n#undef DFSAN_FLAG\n}\n\nstatic void RegisterDfsanFlags(FlagParser *parser, Flags *f) {\n#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \\\n  RegisterFlag(parser, #Name, Description, &f->Name);\n#include \"dfsan_flags.inc\"\n#undef DFSAN_FLAG\n}\n\nstatic void InitializeTaintFile() {\n  struct stat st;\n  const char *filename = flags().taint_file;\n  int err;\n  if (internal_strcmp(filename, \"stdin\") == 0) {\n    tainted.fd = 0;\n    // try to get the size, as stdin may be a file\n    if (!fstat(0, &st) && S_ISREG(st.st_mode)) {\n      tainted.size = st.st_size;\n      tainted.is_stdin = 0;\n      // map a copy\n      tainted.buf_size = RoundUpTo(st.st_size, GetPageSizeCached());\n      uptr map = internal_mmap(nullptr, tainted.buf_size, PROT_READ, MAP_PRIVATE, 0, 0);\n      if (internal_iserror(map, &err)) {\n        Printf(\"FATAL: failed to map a copy of input file %s\\n\", strerror(err));\n        Die();\n      }\n      tainted.buf = reinterpret_cast<char *>(map);\n    } else {\n      tainted.size = 1;\n      tainted.is_stdin = 1; // truly stdin\n    }\n  } else if (internal_strcmp(filename, \"\") == 0) {\n    tainted.fd = -1;\n  } else {\n    if (!realpath(filename, tainted.filename)) {\n      Report(\"WARNING: failed to get to real path for taint file\\n\");\n      return;\n    }\n    stat(filename, &st);\n    tainted.size = st.st_size;\n    tainted.is_stdin = 0;\n    // map a copy\n    tainted.buf = static_cast<char *>(\n      MapFileToMemory(filename, &tainted.buf_size));\n    if (tainted.buf == nullptr) {\n      Printf(\"FATAL: failed to map a copy of input file\\n\");\n      Die();\n    }\n    AOUT(\"%s %ld size\\n\", filename, tainted.size);\n  }\n\n  if (tainted.fd != -1 && !tainted.is_stdin) {\n    for (off_t i = 0; i < tainted.size; i++) {\n      dfsan_label label = dfsan_create_label(i);\n      dfsan_check_label(label);\n    }\n  }\n}\n\nstatic void InitializeTaintSocket() {\n  const char *host = flags().taint_socket;\n  internal_memset(tainted_socket.host, 0, sizeof(tainted_socket.host));\n  tainted_socket.family = -1;\n  tainted_socket.port = -1;\n  tainted_socket.fd = -1;\n  if (internal_strstr(host, \"tcp@\") == host || internal_strstr(host, \"udp@\") == host) {\n    char *port = internal_strchr(host + 4, '@');\n    if (port) {\n      tainted_socket.family = AF_INET;\n      size_t addr_len = (uptr)port - (uptr)host - 4;\n      internal_memcpy(tainted_socket.host, host + 4, addr_len);\n      tainted_socket.host[addr_len] = '\\0';\n      tainted_socket.port = atoi(port + 1);\n    } else {\n      Report(\"FATAL: invalid inet socket %s\\n\", host);\n      Die();\n    }\n  } else if (internal_strstr(host, \"tcp6@\") == host || internal_strstr(host, \"udp6@\") == host) {\n    char *port = internal_strchr(host + 5, '@');\n    if (port) {\n      tainted_socket.family = AF_INET6;\n      size_t addr_len = (uptr)port - (uptr)host - 5;\n      internal_memcpy(tainted_socket.host, host + 5, addr_len);\n      tainted_socket.host[addr_len] = '\\0';\n      tainted_socket.port = atoi(port + 1);\n    } else {\n      Report(\"FATAL: invalid inet6 socket %s\\n\", host);\n      Die();\n    }\n  } else if (internal_strstr(host, \"unix@\") == host) {\n    tainted_socket.family = AF_UNIX;\n    uptr len = internal_strlen(host + 5);\n    if (len < sizeof(tainted_socket.host)) {\n      internal_memcpy(tainted_socket.host, host + 5, len);\n    } else {\n      Report(\"FATAL: invalid unix socket %s\\n\", host);\n      Die();\n    }\n  } else if (internal_strcmp(host, \"\")) {\n    Report(\"FATAL: unsupported taint socket %s\\n\", host);\n    Die();\n  }\n}\n\n// Hash tables for string label tracking\nstatic uptr content_map_capacity = 0;\nstatic struct {\n  uptr addr;\n  dfsan_label label;\n} *__taint_content_map = nullptr;\nstatic uptr content_map_count = 0;\n\nstatic uptr indexof_map_capacity = 0;\nstatic struct {\n  uptr addr;\n  dfsan_label label;\n} *__taint_indexof_map = nullptr;\nstatic uptr indexof_map_count = 0;\n\n// Hash function optimized for shadow memory addresses (0x700000040000 ~ 0x800000000000)\n// Focus on middle bits where entropy is highest\nstatic inline uptr hash_addr(uptr addr, uptr capacity) {\n  addr >>= 3;  // Remove low 3 bits (8-byte alignment)\n  addr *= 2654435769UL;  // Multiplicative hash\n  return addr & (capacity - 1);  // Fast modulo for power-of-2\n}\n\n// Grow content map when load factor exceeds 0.7\nstatic void grow_content_map() {\n  uptr new_capacity = content_map_capacity * 2;\n  typeof(__taint_content_map) new_map = (typeof(__taint_content_map))InternalAlloc(\n      new_capacity * sizeof(*__taint_content_map));\n  internal_memset(new_map, 0, new_capacity * sizeof(*__taint_content_map));\n\n  // Rehash existing entries\n  for (uptr i = 0; i < content_map_capacity; i++) {\n    if (__taint_content_map[i].addr != 0) {\n      uptr hash = hash_addr(__taint_content_map[i].addr, new_capacity);\n      while (new_map[hash].addr != 0) {\n        hash = (hash + 1) & (new_capacity - 1);\n      }\n      new_map[hash] = __taint_content_map[i];\n    }\n  }\n\n  InternalFree(__taint_content_map);\n  __taint_content_map = new_map;\n  content_map_capacity = new_capacity;\n}\n\n// Grow indexOf map\nstatic void grow_indexof_map() {\n  uptr new_capacity = indexof_map_capacity * 2;\n  typeof(__taint_indexof_map) new_map = (typeof(__taint_indexof_map))InternalAlloc(\n      new_capacity * sizeof(*__taint_indexof_map));\n  internal_memset(new_map, 0, new_capacity * sizeof(*__taint_indexof_map));\n\n  for (uptr i = 0; i < indexof_map_capacity; i++) {\n    if (__taint_indexof_map[i].addr != 0) {\n      uptr hash = hash_addr(__taint_indexof_map[i].addr, new_capacity);\n      while (new_map[hash].addr != 0) {\n        hash = (hash + 1) & (new_capacity - 1);\n      }\n      new_map[hash] = __taint_indexof_map[i];\n    }\n  }\n\n  InternalFree(__taint_indexof_map);\n  __taint_indexof_map = new_map;\n  indexof_map_capacity = new_capacity;\n}\n\nstatic void InitializeStringMaps() {\n  // Round up to nearest power of 2 for efficient hashing\n  uptr capacity = flags().string_map_capacity;\n  if (capacity < 16) capacity = 16;  // Minimum size\n  // Round up to power of 2\n  capacity--;\n  capacity |= capacity >> 1;\n  capacity |= capacity >> 2;\n  capacity |= capacity >> 4;\n  capacity |= capacity >> 8;\n  capacity |= capacity >> 16;\n  capacity |= capacity >> 32;\n  capacity++;\n\n  // Content map\n  content_map_capacity = capacity;\n  __taint_content_map = (typeof(__taint_content_map))InternalAlloc(\n      content_map_capacity * sizeof(*__taint_content_map));\n  internal_memset(__taint_content_map, 0,\n      content_map_capacity * sizeof(*__taint_content_map));\n  content_map_count = 0;\n\n  // IndexOf map\n  indexof_map_capacity = capacity;\n  __taint_indexof_map = (typeof(__taint_indexof_map))InternalAlloc(\n      indexof_map_capacity * sizeof(*__taint_indexof_map));\n  internal_memset(__taint_indexof_map, 0,\n      indexof_map_capacity * sizeof(*__taint_indexof_map));\n  indexof_map_count = 0;\n}\n\nextern \"C\" void taint_set_str_content_label(void *addr, dfsan_label label) {\n  AOUT(\"taint_set_str_content_label: addr=%p, label=%u\\n\", addr, label);\n\n  // Grow if needed\n  if (content_map_count > (content_map_capacity * 7 / 10)) {\n    grow_content_map();\n  }\n\n  uptr hash = hash_addr((uptr)addr, content_map_capacity);\n\n  // Linear probing\n  while (__taint_content_map[hash].addr != 0 &&\n         __taint_content_map[hash].addr != (uptr)addr) {\n    hash = (hash + 1) & (content_map_capacity - 1);\n  }\n\n  if (__taint_content_map[hash].addr == 0) {\n    content_map_count++;\n  } else {\n    AOUT(\"update content label: old = %u\\n\", __taint_content_map[hash].label);\n  }\n\n  __taint_content_map[hash].addr = (uptr)addr;\n  __taint_content_map[hash].label = label;\n}\n\nextern \"C\" dfsan_label taint_get_str_content_label(const void *addr) {\n  uptr hash = hash_addr((uptr)addr, content_map_capacity);\n  uptr start = hash;\n\n  while (__taint_content_map[hash].addr != 0) {\n    if (__taint_content_map[hash].addr == (uptr)addr) {\n      AOUT(\"taint_get_str_content_label: addr=%p, found label=%u\\n\",\n           addr, __taint_content_map[hash].label);\n      return __taint_content_map[hash].label;\n    }\n    hash = (hash + 1) & (content_map_capacity - 1);\n    if (hash == start) break;\n  }\n  AOUT(\"addr=%p, not found\\n\", addr);\n  return 0;\n}\n\nextern \"C\" void taint_set_str_indexof_label(void *addr, dfsan_label label) {\n  AOUT(\"taint_set_str_indexof_label: addr=%p, label=%u\\n\", addr, label);\n\n  if (indexof_map_count > (indexof_map_capacity * 7 / 10)) {\n    grow_indexof_map();\n  }\n\n  uptr hash = hash_addr((uptr)addr, indexof_map_capacity);\n\n  while (__taint_indexof_map[hash].addr != 0 &&\n         __taint_indexof_map[hash].addr != (uptr)addr) {\n    hash = (hash + 1) & (indexof_map_capacity - 1);\n  }\n\n  if (__taint_indexof_map[hash].addr == 0) {\n    indexof_map_count++;\n  } else {\n    AOUT(\"update indexof label: old = %u\\n\", __taint_indexof_map[hash].label);\n  }\n\n  __taint_indexof_map[hash].addr = (uptr)addr;\n  __taint_indexof_map[hash].label = label;\n}\n\nextern \"C\" dfsan_label taint_get_str_indexof_label(const void *addr) {\n  uptr hash = hash_addr((uptr)addr, indexof_map_capacity);\n  uptr start = hash;\n\n  while (__taint_indexof_map[hash].addr != 0) {\n    if (__taint_indexof_map[hash].addr == (uptr)addr) {\n      AOUT(\"addr=%p, found label=%u\\n\", addr, __taint_indexof_map[hash].label);\n      return __taint_indexof_map[hash].label;\n    }\n    hash = (hash + 1) & (indexof_map_capacity - 1);\n    if (hash == start) break;\n  }\n  AOUT(\"addr=%p, not found\\n\", addr);\n  return 0;\n}\n\n// information is passed implicitly through flags()\nextern \"C\" void InitializeSolver();\n\nstatic void InitializeFlags() {\n  SetCommonFlagsDefaults();\n  flags().SetDefaults();\n\n  FlagParser parser;\n  RegisterCommonFlags(&parser);\n  RegisterDfsanFlags(&parser, &flags());\n  parser.ParseString(GetEnv(\"TAINT_OPTIONS\"));\n  InitializeCommonFlags();\n  if (Verbosity()) ReportUnrecognizedFlags();\n  if (common_flags()->help) parser.PrintFlagDescriptions();\n}\n\nstatic void InitializePlatformEarly() {\n  AvoidCVE_2016_2143();\n#ifdef DFSAN_RUNTIME_VMA\n  __dfsan::vmaSize =\n    (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);\n  if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||\n      __dfsan::vmaSize == 48) {\n    __dfsan_shadow_ptr_mask = ShadowMask();\n  } else {\n    Printf(\"FATAL: DataFlowSanitizer: unsupported VMA range\\n\");\n    Printf(\"FATAL: Found %d - Supported 39, 42, and 48\\n\", __dfsan::vmaSize);\n    Die();\n  }\n#endif\n}\n\nstatic void dfsan_fini() {\n  if (internal_strcmp(flags().dump_labels_at_exit, \"\") != 0) {\n    fd_t fd = OpenFile(flags().dump_labels_at_exit, WrOnly);\n    if (fd == kInvalidFd) {\n      Report(\"WARNING: DataFlowSanitizer: unable to open output file %s\\n\",\n             flags().dump_labels_at_exit);\n      return;\n    }\n\n    Report(\"INFO: DataFlowSanitizer: dumping labels to %s\\n\",\n           flags().dump_labels_at_exit);\n    dfsan_dump_labels(fd);\n    CloseFile(fd);\n  }\n  if (tainted.buf) {\n    UnmapOrDie(tainted.buf, tainted.buf_size);\n  }\n  if (flags().shm_fd != -1) {\n    internal_munmap((void *)UnionTableAddr(), uniontable_size);\n  }\n}\n\nstatic void dfsan_init(int argc, char **argv, char **envp) {\n  InitializeFlags();\n  print_debug = flags().debug;\n\n  ::InitializePlatformEarly();\n  uptr ret;\n  int err;\n  ret = MmapFixedSuperNoReserve(ShadowAddr(), UnionTableAddr() - ShadowAddr());\n  if (internal_iserror(ret, &err)) {\n    Printf(\"FATAL: error mapping shadow %s\\n\", strerror(err));\n    Die();\n  }\n\n  // init union table\n  __dfsan_label_info = (dfsan_label_info *)UnionTableAddr();\nif (flags().shm_fd != -1) {\n    AOUT(\"shm_fd %d\\n\", flags().shm_fd);\n    ret = internal_mmap((void*)UnionTableAddr(), uniontable_size,\n        PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, flags().shm_fd, 0);\n  } else {\n    ret = MmapFixedSuperNoReserve(UnionTableAddr(), uniontable_size);\n  }\n  if (internal_iserror(ret, &err)) {\n    Printf(\"FATAL: error mapping shared union table %s\\n\", strerror(err));\n    Die();\n  }\n\n  // init const label\n  internal_memset(&__dfsan_label_info[CONST_LABEL], 0, sizeof(dfsan_label_info));\n  __dfsan_label_info[CONST_LABEL].size = 8;\n\n  // init hashtable allocator\n  __taint::allocator_init(HashTableAddr(), HashTableAddr() + hashtable_size);\n\n  // init main thread\n  auto num_of_labels = uniontable_size / sizeof(dfsan_label_info);\n  __alloca_stack_top = __alloca_stack_bottom = (dfsan_label)(num_of_labels - 2);\n\n  // Protect the region of memory we don't use, to preserve the one-to-one\n  // mapping from application to shadow memory. But if ASLR is disabled, Linux\n  // will load our executable in the middle of our unused region. This mostly\n  // works so long as the program doesn't use too much memory. We support this\n  // case by disabling memory protection when ASLR is disabled.\n  uptr init_addr = (uptr)&dfsan_init;\n  if (!(init_addr >= UnusedAddr() && init_addr < AppAddr()))\n    MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr());\n\n  InitializeInterceptors();\n\n  InitializeTaintFile();\n\n  InitializeTaintSocket();\n\n  InitializeStringMaps();\n\n  InitializeSolver();\n\n  // Register the fini callback to run when the program terminates successfully\n  // or it is killed by the runtime.\n  Atexit(dfsan_fini);\n  AddDieCallback(dfsan_fini);\n}\n\n#if SANITIZER_CAN_USE_PREINIT_ARRAY\n__attribute__((section(\".preinit_array\"), used))\nstatic void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init;\n#endif\n\nextern \"C\" {\nSANITIZER_INTERFACE_WEAK_DEF(void, InitializeSolver, void) {}\n\n// Default empty implementations (weak) for hooks\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_cmp, dfsan_label, dfsan_label,\n                             uint32_t, uint32_t, uint64_t, uint64_t, uint32_t) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_cond, dfsan_label, bool,\n                             uint8_t, uint32_t) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_loop, uint32_t, uint32_t) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_switch_end, uint32_t) {}\nSANITIZER_INTERFACE_WEAK_DEF(dfsan_label, __taint_trace_select, dfsan_label,\n                             dfsan_label, dfsan_label, uint8_t, uint8_t, uint8_t,\n                             uint32_t) {return 0;}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_indcall, dfsan_label) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_gep, dfsan_label, uint64_t,\n                             dfsan_label, int64_t, uint64_t, uint64_t, int64_t,\n                             uint32_t) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_offset, dfsan_label, int64_t,\n                             unsigned) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_memcmp, dfsan_label) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __taint_trace_distance, uint64_t, uint64_t) {}\nSANITIZER_WEAK_ATTRIBUTE THREADLOCAL uint32_t __taint_trace_callstack;\n}  // extern \"C\"\n"
  },
  {
    "path": "runtime/dfsan/dfsan.h",
    "content": "//===-- dfsan.h -------------------------------------------------*- C++ -*-===//\n//\n//                     The LLVM Compiler Infrastructure\n//\n// This file is distributed under the University of Illinois Open Source\n// License. See LICENSE.TXT for details.\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of DataFlowSanitizer.\n//\n// Private DFSan header.\n//===----------------------------------------------------------------------===//\n\n#ifndef DFSAN_H\n#define DFSAN_H\n\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n#include \"dfsan_platform.h\"\n#include <stdio.h>\n#include <stdint.h>\n\nusing __sanitizer::uptr;\n\nextern bool print_debug;\n\n# define AOUT(...)                                      \\\n  do {                                                  \\\n    if (print_debug)  {                                 \\\n      Printf(\"[RT] (%s:%d) \", __FUNCTION__, __LINE__);  \\\n      Printf(__VA_ARGS__);                              \\\n    }                                                   \\\n  } while(false)\n\n// Copy declarations from public sanitizer/dfsan_interface.h header here.\ntypedef uint32_t dfsan_label;\n\ntypedef union {\n  uint64_t i;\n  float f;\n  double d;\n} data;\n\nstruct dfsan_label_info {\n  dfsan_label l1;\n  dfsan_label l2;\n  data op1;\n  data op2;\n  uint16_t op;\n  uint16_t size; // FIXME: this limit the size of the operand to 65535 bits or bytes (in case of memcmp)\n  uint32_t hash;\n} __attribute__((aligned (8), packed));\n\n#ifndef PATH_MAX\n# define PATH_MAX 4096\n#endif\n#define CONST_OFFSET 1\n#define CONST_LABEL 0\n\nstatic const size_t uniontable_size = 0xc00000000; // FIXME\n\nstruct taint_file {\n  char filename[PATH_MAX];\n  int fd;\n  off_t offset;\n  dfsan_label offset_label;\n  dfsan_label label;\n  off_t size;\n  uint8_t is_stdin;\n  uint8_t is_utmp;\n  char *buf;\n  uptr buf_size;\n};\n\nstruct taint_socket {\n  int family;\n  int port;\n  int fd;\n  off_t offset;\n  char host[PATH_MAX];\n};\n\nextern \"C\" {\nvoid dfsan_add_label(dfsan_label label, uint8_t op, void *addr, uptr size);\nvoid dfsan_set_label(dfsan_label label, void *addr, uptr size);\ndfsan_label dfsan_read_label(const void *addr, uptr size);\nvoid dfsan_store_label(dfsan_label l1, void *addr, uptr size);\ndfsan_label dfsan_union(dfsan_label l1, dfsan_label l2, uint16_t op, uint16_t size,\n                        uint64_t op1, uint64_t op2);\ndfsan_label dfsan_create_label(off_t offset);\ndfsan_label dfsan_get_label(const void *addr);\ndfsan_label_info* dfsan_get_label_info(dfsan_label label);\n\n// taint source\nvoid taint_set_file(int dirfd, const char *filename, int fd);\noff_t taint_get_file(int fd);\nvoid taint_close_file(int fd);\nint is_taint_file(const char *filename);\nint is_stdin_taint(void);\nvoid taint_set_offset_label(dfsan_label label);\ndfsan_label taint_get_offset_label();\n\n// taint tracking for string operations\nvoid taint_set_str_content_label(void *addr, dfsan_label label);\ndfsan_label taint_get_str_content_label(const void *addr);\nvoid taint_set_str_indexof_label(void *addr, dfsan_label label);\ndfsan_label taint_get_str_indexof_label(const void *addr);\n\n// taint source utmp\noff_t get_utmp_offset(void);\nvoid set_utmp_offset(off_t offset);\nint is_utmp_taint(void);\n\n// taint source socket\nvoid taint_set_socket(const void *addr, unsigned addrlen, int fd);\noff_t taint_get_socket(int fd);\nvoid taint_update_socket_offset(int fd, size_t size);\nvoid taint_close_socket(int fd);\n}  // extern \"C\"\n\ntemplate <typename T>\nvoid dfsan_set_label(dfsan_label label, T &data) {  // NOLINT\n  dfsan_set_label(label, (void *)&data, sizeof(T));\n}\n\nnamespace __dfsan {\n\nconst dfsan_label kInitializingLabel = -1;\n\nvoid InitializeInterceptors();\n\ninline dfsan_label *shadow_for(void *ptr) {\n  return (dfsan_label *) ((((uptr) ptr) & ShadowMask()) << 2);\n}\n\ninline const dfsan_label *shadow_for(const void *ptr) {\n  return shadow_for(const_cast<void *>(ptr));\n}\n\ninline void *app_for(const dfsan_label *l) {\n  return (void *) ((((uptr) l) >> 2) | AppBaseAddr());\n}\n\ndfsan_label_info* get_label_info(dfsan_label label);\n\nstruct Flags {\n#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;\n#include \"dfsan_flags.inc\"\n#undef DFSAN_FLAG\n\n  void SetDefaults();\n};\n\nextern Flags flags_data;\ninline Flags &flags() {\n  return flags_data;\n}\n\n// taint source\nextern struct taint_file tainted;\nextern struct taint_socket tainted_socket;\n\nenum operators {\n  Not       = 1,\n  Neg       = 2,\n#define HANDLE_BINARY_INST(num, opcode, Class) opcode = num,\n#define HANDLE_MEMORY_INST(num, opcode, Class) opcode = num,\n#define HANDLE_CAST_INST(num, opcode, Class) opcode = num,\n#define HANDLE_OTHER_INST(num, opcode, Class) opcode = num,\n#define LAST_OTHER_INST(num) last_llvm_op = num,\n#include \"llvm/IR/Instruction.def\"\n#undef HANDLE_BINARY_INST\n#undef HANDLE_MEMORY_INST\n#undef HANDLE_CAST_INST\n#undef HANDLE_OTHER_INST\n#undef LAST_OTHER_INST // last_llvm_op = 67 for llvm14\n  // self-defined\n  Free      = last_llvm_op + 3, // 70\n  Extract   = last_llvm_op + 4, // 71\n  Concat    = last_llvm_op + 5, // 72\n  Arg       = last_llvm_op + 6, // 73\n  // higher-order\n  fmemcmp   = last_llvm_op + 7, // 74\n  fsize     = last_llvm_op + 8, // 75\n  fatoi     = last_llvm_op + 9, // 76\n  fstrlen   = last_llvm_op + 10, // 77\n  // string search ops that return positions (for chaining detection)\n  fstr_op_start = last_llvm_op + 11, // 78\n  fstrchr   = last_llvm_op + 11,  // 78 strchr/memchr\n  fstrrchr  = last_llvm_op + 12,  // 79 strrchr/memrchr\n  fstrstr   = last_llvm_op + 13,  // 80 strstr/memmem\n  fstrpbrk  = last_llvm_op + 14,  // 81 strpbrk - find first char from set\n  fstr_off  = last_llvm_op + 15,  // 82 string op + constant offset (for ptr arithmetic)\n  fsubstr   = last_llvm_op + 16,  // 83 substr(s, 0, len) - for bounded search\n  fstrcat   = last_llvm_op + 17,  // 84 strcat/strncat - string concatenation\n  fstr_op_end = last_llvm_op + 18, // 85\n  // string comparison (returns 0/1, NOT a position - must be outside fstr_op range)\n  fstrcmp   = last_llvm_op + 18, // 85 strcmp using Z3 string theory\n  fprefixof = last_llvm_op + 19, // 86 prefixof(str, prefix) using Z3 string theory\n  fsuffixof = last_llvm_op + 20, // 87 suffixof(str, suffix) using Z3 string theory\n  LastOp    = last_llvm_op + 21, // 88\n};\n\nenum predicate {\n  bveq = 32,\n  bvneq = 33,\n  bvugt = 34,\n  bvuge = 35,\n  bvult = 36,\n  bvule = 37,\n  bvsgt = 38,\n  bvsge = 39,\n  bvslt = 40,\n  bvsle = 41\n};\n\nstatic inline uint8_t get_const_result(uint64_t c1, uint64_t c2, uint32_t predicate) {\n  switch (predicate) {\n    case bveq:  return c1 == c2;\n    case bvneq: return c1 != c2;\n    case bvugt: return c1 > c2;\n    case bvuge: return c1 >= c2;\n    case bvult: return c1 < c2;\n    case bvule: return c1 <= c2;\n    case bvsgt: return (int64_t)c1 > (int64_t)c2;\n    case bvsge: return (int64_t)c1 >= (int64_t)c2;\n    case bvslt: return (int64_t)c1 < (int64_t)c2;\n    case bvsle: return (int64_t)c1 <= (int64_t)c2;\n    default: break;\n  }\n  return 0;\n}\n\nstatic inline bool is_commutative(unsigned char op) {\n  switch(op) {\n    case Not:\n    case And:\n    case Or:\n    case Xor:\n    case Add:\n    case Mul:\n    case fmemcmp:\n    case fstrcmp:\n      return true;\n    default:\n      return false;\n  }\n}\n\n// for out-of-process solving\n\nenum pipe_msg_type {\n  cond_type = 0,\n  gep_type = 1,\n  memcmp_type = 2,\n  fsize_type = 3,\n  memerr_type = 4,\n};\n\nstatic const uint8_t TrueBranchLoopLatch = 0x8;\nstatic const uint8_t FalseBranchLoopLatch = 0x4;\nstatic const uint8_t TrueBranchLoopExit = 0x2;\nstatic const uint8_t FalseBranchLoopExit = 0x1;\nstatic const uint8_t LoopFlagMask = 0xF;\nstatic const uint8_t UndefinedCheck = 0x10;\n\nenum undefined_check_ids {\n  ub_integer_overflow = 1,\n  ub_division_by_zero,\n  ub_shift_exponent,\n  ub_shift_overflow,\n  ub_shift_base,\n  ub_index_underflow,\n  ub_index_overflow,\n  ub_size_underflow,\n  ub_size_overflow,\n  ub_size_to_buffer_overflow,\n  ub_integer_to_buffer_overflow,\n  ub_null_pointer,\n  ub_unsigned_integer_truncation,\n  ub_signed_integer_truncation,\n  ub_integer_sign_change,\n};\n\n#define F_ADD_CONS   0x1\n#define F_LOOP_EXIT  0x2\n#define F_LOOP_LATCH 0x4\n\n#define F_MEMERR_UAF  0x1\n#define F_MEMERR_OLB  0x2\n#define F_MEMERR_OUB  0x4\n#define F_MEMERR_UBI  0x8\n#define F_MEMERR_NULL 0x10\n#define F_MEMERR_FREE 0x20 // double free\n\nstruct pipe_msg {\n  uint16_t msg_type;\n  uint16_t flags;\n  uint32_t instance_id;\n  uptr addr;\n  uint32_t context;\n  uint32_t id;\n  uint32_t label;\n  uint64_t result;\n} __attribute__((packed));\n\n// additional info for gep\nstruct gep_msg {\n  uint32_t ptr_label;\n  uint32_t index_label;\n  uptr ptr;\n  int64_t index;\n  uint64_t num_elems;\n  uint64_t elem_size;\n  int64_t current_offset;\n} __attribute__((packed));\n\n// saving the memcmp target\nstruct memcmp_msg {\n  uint32_t label;\n  uint8_t content[0];\n} __attribute__((packed));\n\n}  // namespace __dfsan\n\n#endif  // DFSAN_H\n"
  },
  {
    "path": "runtime/dfsan/dfsan.syms.extra",
    "content": "dfsan_*\n__dfsan_*\n__dfsw_*\n__taint_*\n"
  },
  {
    "path": "runtime/dfsan/dfsan_custom.cpp",
    "content": "//===-- dfsan.cc ----------------------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of DataFlowSanitizer.\n//\n// This file defines the custom functions listed in done_abilist.txt.\n//===----------------------------------------------------------------------===//\n\n#include <arpa/inet.h>\n#include <assert.h>\n#include <ctype.h>\n#include <dlfcn.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <link.h>\n#include <malloc.h>\n#include <poll.h>\n#include <pthread.h>\n#include <pwd.h>\n#include <sched.h>\n#include <signal.h>\n#include <stdarg.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/epoll.h>\n#include <sys/mman.h>\n#include <sys/resource.h>\n#include <sys/select.h>\n#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/time.h>\n#include <sys/types.h>\n#include <time.h>\n#include <unistd.h>\n#include <utmpx.h>\n#include <wchar.h>\n\n#include \"dfsan.h\"\n\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n#include \"sanitizer_common/sanitizer_linux.h\"\n#include \"sanitizer_common/sanitizer_stackdepot.h\"\n\nusing namespace __dfsan;\n\n#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)                                     \\\n  do {                                                                         \\\n    if (f)                                                                     \\\n      f(__VA_ARGS__);                                                          \\\n  } while (false)\n#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \\\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__);\n\n#define AIXCC_HACK 1\n\nstatic off_t current_stdin_offset = 0;\n\n// Check if an op is a string operation (fstr_op_start to fstr_op_end)\nstatic inline bool is_string_op(uint16_t op) {\n  return op >= __dfsan::fstr_op_start && op < __dfsan::fstr_op_end;\n}\n\n// Check if an op is an indexOf-type operation (returns position, not content)\n// These are: fstrchr, fstrrchr, fstrstr, fstrpbrk, fstr_off\nstatic inline bool is_indexof_op(uint16_t op) {\n  return op >= __dfsan::fstrchr && op <= __dfsan::fstr_off;\n}\n\n// Check if an op is a content-type string operation (fsubstr, fstrcat)\nstatic inline bool is_content_string_op(uint16_t op) {\n  return op == __dfsan::fsubstr || op == __dfsan::fstrcat;\n}\n\n// Helper: Find the first (base) input byte label from a content label.\n// Walks through Concat chains and Load operations to find the starting input.\n// Returns the base label, or 0 if not found.\nstatic dfsan_label get_base_input_label(dfsan_label label) {\n  if (label < CONST_OFFSET) return 0;\n\n  dfsan_label_info *info = dfsan_get_label_info(label);\n\n  // Base input label has op == 0\n  if (info->op == 0) return label;\n\n  // For Concat (op 72), walk left (l1) to find the base\n  if (info->op == __dfsan::Concat) {\n    return get_base_input_label(info->l1);\n  }\n\n  // For Load (op 32), l1 is the starting label\n  if (info->op == __dfsan::Load) {\n    return info->l1;\n  }\n\n  // For other ops, try l1\n  if (info->l1 >= CONST_OFFSET) {\n    return get_base_input_label(info->l1);\n  }\n\n  return 0;\n}\n\n// Helper: Find if a label derives from a string op (fstrchr, fstrrchr, fstrstr)\n// by walking through PtrToInt, Sub, Add operations.\n// Returns the string op label if found, 0 otherwise.\nstatic dfsan_label find_string_op_source(dfsan_label label) {\n  if (label < CONST_OFFSET) return 0;\n\n  dfsan_label_info *info = dfsan_get_label_info(label);\n  uint16_t op = info->op;\n\n  // Check if this is directly a string op\n  if (is_string_op(op)) {\n    return label;\n  }\n\n  // Follow through PtrToInt, Sub, Add to find the source string op\n  if (op == __dfsan::PtrToInt || op == __dfsan::Sub || op == __dfsan::Add) {\n    // Recursively check l1 (the primary operand)\n    if (info->l1 >= CONST_OFFSET) {\n      dfsan_label result = find_string_op_source(info->l1);\n      if (result != 0) return result;\n    }\n    // For Sub/Add, also check l2\n    if ((op == __dfsan::Sub || op == __dfsan::Add) && info->l2 >= CONST_OFFSET) {\n      dfsan_label result = find_string_op_source(info->l2);\n      if (result != 0) return result;\n    }\n  }\n\n  return 0;\n}\n\n// Unified method to get string label with explicit length\n// Checks (in order):\n// 1. Runtime content map (for strncpy/strcat destinations)\n// 2. Pointer label itself being a content-type string op (for chaining)\n// 3. indexOf map at address s for suffix case (strcpy from pos+1)\n// 4. If n_label derives from a string op, create fsubstr to preserve constraint\n// 5. Buffer content labels via dfsan_read_label\nstatic inline dfsan_label get_str_label_n(const void *s, dfsan_label s_label,\n                                           size_t n, dfsan_label n_label) {\n  AOUT(\"get_str_label_n: s=%p, s_label=%u, n=%zu, n_label=%u\\n\", s, s_label, n, n_label);\n\n  // 1. Check content map for fsubstr/fstrcat labels (from strncpy/strcat destinations)\n  dfsan_label content = taint_get_str_content_label(s);\n  if (content != 0) {\n    AOUT(\"get_str_label_n: step 1 returns content=%u\\n\", content);\n    return content;\n  }\n\n  // 2. Check if pointer label itself is a content-type string op (for chaining)\n  // Only chain on fsubstr/fstrcat, NOT indexOf ops (fstrchr, fstrrchr, etc.)\n  if (s_label >= CONST_OFFSET) {\n    dfsan_label_info *info = dfsan_get_label_info(s_label);\n    AOUT(\"get_str_label_n: step 2 s_label op=%u, is_content=%d\\n\",\n         info ? info->op : 0, info ? is_content_string_op(info->op) : 0);\n    if (info && is_content_string_op(info->op)) {\n      AOUT(\"get_str_label_n: step 2 returns s_label=%u\\n\", s_label);\n      return s_label;\n    }\n  }\n\n  // 3. Check for suffix case: searching from a previous indexOf result position\n  // Creates fsubstr(content, start_pos, remaining) for:\n  //   a) strcpy(suffix, pos + 1) where gep_ptr stored fstr_off at pos+1\n  //   b) memchr(t1, c, len) where t1 was returned by previous indexOf\n  dfsan_label start_label = taint_get_str_indexof_label(s);\n  if (start_label != 0) {\n    dfsan_label_info *start_info = dfsan_get_label_info(start_label);\n    if (start_info) {\n      dfsan_label indexOf_label = 0;\n\n      if (start_info->op == __dfsan::fstr_off && start_info->l1 >= CONST_OFFSET) {\n        // Case 3a: fstr_off points to indexOf op\n        indexOf_label = start_info->l1;\n      } else if (is_indexof_op(start_info->op)) {\n        // Case 3b: Direct indexOf - only if s_label confirms indexOf origin\n        // This distinguishes memchr(t1,...) from memchr(buf,...) when t1==buf\n        dfsan_label_info *s_info = (s_label >= CONST_OFFSET) ?\n                                    dfsan_get_label_info(s_label) : nullptr;\n        if (s_info && is_indexof_op(s_info->op)) {\n          indexOf_label = start_label;\n        }\n      }\n\n      if (indexOf_label != 0) {\n        dfsan_label_info *idx_info = dfsan_get_label_info(indexOf_label);\n        if (idx_info && idx_info->l1 >= CONST_OFFSET) {\n          // Create suffix fsubstr: substr(content, start_pos, remaining)\n          // l1=content, l2=position label, op1=concrete len, op2=1 (suffix mode)\n          return dfsan_union(idx_info->l1, start_label, __dfsan::fsubstr,\n                             sizeof(void*) * 8, (uint64_t)n, 1);\n        }\n      }\n    }\n  }\n\n  // 4. Check if n_label derives from a string op (e.g., ptr arithmetic on memchr result)\n  // If so, create fsubstr to represent substr(content, 0, idx) where idx is the string op result\n  // IMPORTANT: Do this even when n=0 to preserve the symbolic constraint!\n  dfsan_label str_op_label = find_string_op_source(n_label);\n  if (str_op_label != 0) {\n    dfsan_label_info *str_op_info = dfsan_get_label_info(str_op_label);\n    dfsan_label str_op_content = str_op_info->l1;\n\n    if (str_op_content >= CONST_OFFSET) {\n      // Get content label from buffer if available for same-buffer verification\n      dfsan_label content_label = (n > 0) ? dfsan_read_label(s, n) : 0;\n\n      // Verify same underlying buffer only if content is available\n      bool same_buffer = true;\n      if (content_label != 0) {\n        dfsan_label src_base = get_base_input_label(content_label);\n        dfsan_label str_op_base = get_base_input_label(str_op_content);\n        same_buffer = (src_base != 0 && src_base == str_op_base);\n      }\n      // When n=0, trust that n_label derives from same buffer\n      // (the alternative is losing the constraint entirely)\n\n      if (same_buffer) {\n        // Create fsubstr: substr(str_op_content, 0, str_op_label)\n        // l1 = original content, l2 = string op label (index), op1 = concrete n, op2 = 0\n        return dfsan_union(str_op_content, str_op_label, __dfsan::fsubstr,\n                           sizeof(void*) * 8, (uint64_t)n, 0);\n      }\n    }\n  }\n\n  // 5. Fall back to reading buffer content labels\n  return dfsan_read_label(s, n);\n}\n\n// Unified method to get string label for null-terminated strings\n// Uses strlen to determine length\n// Also checks if null terminator was placed at a strchr/strstr result position\nstatic inline dfsan_label get_str_label(const char *s, dfsan_label s_label) {\n  size_t len = strlen(s);\n\n  // Check if null terminator was placed at a position found by strchr/strstr/etc.\n  // This allows us to recover symbolic length when code does:\n  //   pos = strchr(buf, '_'); *pos = '\\0'; strcpy(dest, buf);\n  dfsan_label term_label = taint_get_str_indexof_label(s + len);\n\n  return get_str_label_n(s, s_label, len + 1, term_label);\n}\n\nstatic inline dfsan_label get_label_for(int fd, off_t offset) {\n  // check if fd is stdin, if so, the label hasn't been pre-allocated\n  if (is_stdin_taint() || (fd ==0 && flags().force_stdin))\n    return dfsan_create_label(current_stdin_offset++);\n  // if fd is a tainted file, the label should have been pre-allocated\n  else return (offset + CONST_OFFSET);\n}\n\nstatic void *dfsan_memcpy(void *dest, const void *src, size_t n) {\n  if (n == 0) return dest;\n  dfsan_label *sdest = shadow_for(dest);\n  const dfsan_label *ssrc = shadow_for(src);\n  // FIXME: check and avoid copying labels?\n  internal_memcpy((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label));\n  return internal_memcpy(dest, src, n);\n}\n\nstatic void dfsan_memset(void *s, int c, dfsan_label c_label, size_t n) {\n  if (n == 0) return;\n  internal_memset(s, c, n);\n  dfsan_set_label(c_label, s, n);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_offset(dfsan_label offset_label, int64_t offset, unsigned size);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_memcmp(dfsan_label label);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_check_bounds(dfsan_label addr_label, uptr addr,\n                          dfsan_label size_label, uint64_t size);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_trace_cond(dfsan_label label, bool r, uint8_t flag, uint32_t cid);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_solve_bounds(dfsan_label ptr_label, uint64_t ptr,\n                          dfsan_label index_label, int64_t index,\n                          uint64_t num_elems, uint64_t elem_size,\n                          int64_t current_offset, uint32_t cid);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_solve_size(dfsan_label ptr_label, uint64_t ptr,\n                        dfsan_label size_label, uint64_t size,\n                        uint32_t cid);\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_trace_memerr(dfsan_label ptr_label, uptr ptr,\n                          dfsan_label size_label, uint64_t size,\n                          uint16_t flag, void *addr);\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_stat(const char *path, struct stat *buf, dfsan_label path_label,\n            dfsan_label buf_label, dfsan_label *ret_label) {\n  int ret = stat(path, buf);\n  if (ret == 0) {\n    dfsan_set_label(0, buf, sizeof(struct stat));\n    if (flags().trace_fsize && is_taint_file(path)) {\n      dfsan_label size = dfsan_union(0, 0, fsize, sizeof(buf->st_size) * 8, 0, 0);\n      dfsan_set_label(size, &buf->st_size, sizeof(buf->st_size));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\n#if __GLIBC__ <= 2 && __GLIBC_MINOR__ < 33\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw___xstat(int vers, const char *path, struct stat *buf,\n               dfsan_label vers_label, dfsan_label path_label,\n               dfsan_label buf_label, dfsan_label *ret_label) {\n  int ret = __xstat(vers, path, buf);\n  if (ret == 0) {\n    dfsan_set_label(0, buf, sizeof(struct stat));\n    if (flags().trace_fsize && is_taint_file(path)) {\n      dfsan_label size = dfsan_union(0, 0, fsize, sizeof(buf->st_size) * 8, 0, 0);\n      dfsan_set_label(size, &buf->st_size, sizeof(buf->st_size));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw___fxstat(int vers, const int fd, struct stat *buf,\n                dfsan_label vers_label, dfsan_label fd_label,\n                dfsan_label buf_label, dfsan_label *ret_label) {\n  int ret = __fxstat(vers, fd, buf);\n  if (ret == 0) {\n    dfsan_set_label(0, buf, sizeof(struct stat));\n    if (flags().trace_fsize && taint_get_file(fd)) {\n      dfsan_label size = dfsan_union(0, 0, fsize, sizeof(buf->st_size) * 8, 0, 0);\n      dfsan_set_label(size, &buf->st_size, sizeof(buf->st_size));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw___lxstat(int vers, const char *path, struct stat *buf,\n                dfsan_label vers_label, dfsan_label path_label,\n                dfsan_label buf_label, dfsan_label *ret_label) {\n  int ret = __lxstat(vers, path, buf);\n  if (ret == 0) {\n    dfsan_set_label(0, buf, sizeof(struct stat));\n    if (flags().trace_fsize && is_taint_file(path)) {\n      dfsan_label size = dfsan_union(0, 0, fsize, sizeof(buf->st_size) * 8, 0, 0);\n      dfsan_set_label(size, &buf->st_size, sizeof(buf->st_size));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n#endif\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_fstat(int fd, struct stat *buf,\n                                               dfsan_label fd_label,\n                                               dfsan_label buf_label,\n                                               dfsan_label *ret_label) {\n  int ret = fstat(fd, buf);\n  if (ret == 0) {\n    dfsan_set_label(0, buf, sizeof(struct stat));\n    if (flags().trace_fsize && taint_get_file(fd)) {\n      dfsan_label size = dfsan_union(0, 0, fsize, sizeof(buf->st_size) * 8, 0, 0);\n      dfsan_set_label(size, &buf->st_size, sizeof(buf->st_size));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_lstat(const char *path, struct stat *buf, dfsan_label path_label,\n             dfsan_label buf_label, dfsan_label *ret_label) {\n  int ret = lstat(path, buf);\n  if (ret == 0) {\n    dfsan_set_label(0, buf, sizeof(struct stat));\n    if (flags().trace_fsize && is_taint_file(path)) {\n      dfsan_label size = dfsan_union(0, 0, fsize, sizeof(buf->st_size) * 8, 0, 0);\n      dfsan_set_label(size, &buf->st_size, sizeof(buf->st_size));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\n// Create a label for string op + constant offset (for pointer arithmetic like sep + 1)\n// If base_label is a string op, returns a new fstr_off label; otherwise returns base_label\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE\nvoid __taint_trace_gep_ptr(dfsan_label base_label, char *result, char *base) {\n  if (base_label < CONST_OFFSET) return;\n\n  // Check if base_label is or derives from a string op\n  dfsan_label str_op_label = find_string_op_source(base_label);\n  if (str_op_label == 0) {\n    // Not a string op - return base label unchanged\n    return;\n  }\n\n  // Create fstr_off label: l1=str_op_label, op1=offset\n  // This represents the content at (string_op_position + offset)\n  uint64_t offset = (uint64_t)(result - base);\n  dfsan_label off_label = dfsan_union(str_op_label, 0, __dfsan::fstr_off,\n                                       sizeof(void*) * 8,\n                                       0, (uint64_t)offset);\n  AOUT(\"gep_ptr: base=%u, str_op=%u, offset=%ld, result=%u\\n\",\n       base_label, str_op_label, offset, off_label);\n\n  // record the label (fstr_off is an indexOf-type op)\n  taint_set_str_indexof_label(result, off_label);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(char *s, int c,\n                                                  dfsan_label s_label,\n                                                  dfsan_label c_label,\n                                                  dfsan_label *ret_label) {\n  char *ret = strchr(s, c);\n\n  // Use unified get_str_label to get source label\n  // Handles str_map, pointer label fsubstr, and buffer content\n  dfsan_label src_label = get_str_label(s, s_label);\n\n  // Create label if source or char is tainted\n  if (src_label != 0 || c_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    size_t haystack_len = strlen(s);\n    uint16_t content_len = (src_label == 0) ? (uint16_t)haystack_len : 0;\n\n    // l1 = src_label (source - for chaining or content dependencies)\n    // l2 = c_label (target char - may be symbolic!)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = char value\n    // size = haystack length if concrete, else 0\n    *ret_label = dfsan_union(src_label, c_label, __dfsan::fstrchr,\n                             content_len,\n                             (uint64_t)s,\n                             (uint64_t)(uint8_t)c);\n\n    // Send concrete haystack content if haystack is concrete\n    if (content_len > 0 && *ret_label) {\n      __taint_trace_memcmp(*ret_label);\n    }\n\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strpbrk(const char *s,\n                                                   const char *accept,\n                                                   dfsan_label s_label,\n                                                   dfsan_label accept_label,\n                                                   dfsan_label *ret_label) {\n  const char *ret = strpbrk(s, accept);\n  size_t accept_len = strlen(accept);\n\n  // Use unified get_str_label for source string\n  dfsan_label src_label = get_str_label(s, s_label);\n\n  // Use unified get_str_label for accept string\n  dfsan_label real_accept_label = get_str_label(accept, accept_label);\n\n  if (src_label != 0 || real_accept_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    size_t haystack_len = strlen(s);\n    uint16_t content_len = 0;\n    if (src_label == 0) {\n      content_len = (uint16_t)haystack_len;\n    } else if (real_accept_label == 0) {\n      content_len = (uint16_t)accept_len;\n    }\n\n    // l1 = src_label (source content)\n    // l2 = accept_label (character set - may be symbolic)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = accept pointer (for concrete content retrieval)\n    // size = haystack length if haystack concrete, else accept length if accept concrete, else 0\n    dfsan_label label = dfsan_union(src_label, real_accept_label, __dfsan::fstrpbrk,\n                                    content_len,\n                                    (uint64_t)s,\n                                    (uint64_t)accept);\n\n    // Send concrete content (haystack or accept)\n    if (content_len > 0 && label) {\n      __taint_trace_memcmp(label);\n    }\n\n    *ret_label = label;\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(const_cast<char *>(ret), *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return const_cast<char *>(ret);\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc,\n                              const void *s1, const void *s2, size_t n,\n                              dfsan_label s1_label, dfsan_label s2_label,\n                              dfsan_label n_label)\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,\n                                                size_t n, dfsan_label s1_label,\n                                                dfsan_label s2_label,\n                                                dfsan_label n_label,\n                                                dfsan_label *ret_label) {\n  CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n,\n                             s1_label, s2_label, n_label);\n  __taint_check_bounds(s1_label, (uptr)s1, n_label, n);\n  __taint_check_bounds(s2_label, (uptr)s2, n_label, n);\n  int ret = memcmp(s1, s2, n);\n\n  // Check for fsubstr labels\n  dfsan_label l1 = get_str_label_n(s1, s1_label, n, n_label);\n  dfsan_label l2 = get_str_label_n(s2, s2_label, n, n_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n    return ret;\n  }\n\n  // Check if either side is a string op - use string theory comparison\n  bool l1_is_string_op = (l1 >= CONST_OFFSET && is_string_op(dfsan_get_label_info(l1)->op));\n  bool l2_is_string_op = (l2 >= CONST_OFFSET && is_string_op(dfsan_get_label_info(l2)->op));\n\n  uint16_t op = (l1_is_string_op || l2_is_string_op) ? __dfsan::fstrcmp : __dfsan::fmemcmp;\n  dfsan_label cmp = dfsan_union(l1, l2, op, n, (uint64_t)s1, (uint64_t)s2);\n  if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_bcmp(const void *s1, const void *s2,\n                                              size_t n, dfsan_label s1_label,\n                                              dfsan_label s2_label,\n                                              dfsan_label n_label,\n                                              dfsan_label *ret_label) {\n  __taint_check_bounds(s1_label, (uptr)s1, n_label, n);\n  __taint_check_bounds(s2_label, (uptr)s2, n_label, n);\n  int ret = bcmp(s1, s2, n);\n\n  // Check for fsubstr labels (from strncpy with symbolic length)\n  dfsan_label l1 = get_str_label_n(s1, s1_label, n, n_label);\n  dfsan_label l2 = get_str_label_n(s2, s2_label, n, n_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n    return ret;\n  }\n\n  // Check if either side is a string op - use string theory comparison\n  bool l1_is_string_op = (l1 >= CONST_OFFSET && is_string_op(dfsan_get_label_info(l1)->op));\n  bool l2_is_string_op = (l2 >= CONST_OFFSET && is_string_op(dfsan_get_label_info(l2)->op));\n\n  uint16_t op = (l1_is_string_op || l2_is_string_op) ? __dfsan::fstrcmp : __dfsan::fmemcmp;\n  dfsan_label cmp = dfsan_union(l1, l2, op, n, (uint64_t)s1, (uint64_t)s2);\n  if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  return ret;\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, uptr caller_pc,\n                              const char *s1, const char *s2,\n                              dfsan_label s1_label, dfsan_label s2_label)\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcmp(const char *s1, const char *s2,\n                                                dfsan_label s1_label,\n                                                dfsan_label s2_label,\n                                                dfsan_label *ret_label) {\n  CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, GET_CALLER_PC(), s1, s2,\n                             s1_label, s2_label);\n  int ret = strcmp(s1, s2);\n\n  AOUT(\"strcmp: s1=%p s2=%p s1_label=%u s2_label=%u\\n\", s1, s2, s1_label, s2_label);\n\n  // Use unified get_str_label to get labels for both strings\n  // Handles str_map, pointer label fsubstr, and buffer content\n  dfsan_label l1 = get_str_label(s1, s1_label);\n  dfsan_label l2 = get_str_label(s2, s2_label);\n  AOUT(\"strcmp: l1=%u l2=%u\\n\", l1, l2);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n  } else {\n    // Determine length for comparison (use concrete side if one is fsubstr)\n    size_t n = strlen(s1) + 1;\n    dfsan_label s1_fsubstr = taint_get_str_content_label(s1);\n    if (s1_fsubstr != 0)\n      n = strlen(s2) + 1;  // use concrete side for length\n\n    // fstrcmp is commutative - dfsan_union will swap to put concrete in op1\n    dfsan_label cmp = dfsan_union(l1, l2, __dfsan::fstrcmp, n,\n                                   (uint64_t)s1, (uint64_t)s2);\n    if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_prefixof(\n    const char *str, const char *prefix,\n    dfsan_label str_label, dfsan_label prefix_label,\n    dfsan_label *ret_label) {\n\n  // Execute concrete operation (simple check)\n  int ret = 0;\n  size_t prefix_len = strlen(prefix);\n  size_t str_len = strlen(str);\n  if (str_len >= prefix_len && memcmp(str, prefix, prefix_len) == 0) {\n    ret = 1;\n  }\n\n  // Get unified labels (handles fsubstr chaining and content maps)\n  dfsan_label l1 = get_str_label(str, str_label);\n  dfsan_label l2 = get_str_label(prefix, prefix_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n  } else {\n    // Determine length for memcmp_cache (use concrete side if one is fsubstr)\n    size_t n = strlen(str) + 1;\n    dfsan_label str_fsubstr = taint_get_str_content_label(str);\n    if (str_fsubstr != 0)\n      n = strlen(prefix) + 1;  // use concrete side for length\n\n    // Create label - fprefixof is commutative, dfsan_union will normalize\n    dfsan_label cmp = dfsan_union(l1, l2, __dfsan::fprefixof, n,\n                                   (uint64_t)str, (uint64_t)prefix);\n    if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_suffixof(\n    const char *str, const char *suffix,\n    dfsan_label str_label, dfsan_label suffix_label,\n    dfsan_label *ret_label) {\n\n  // Execute concrete operation\n  int ret = 0;\n  size_t suffix_len = strlen(suffix);\n  size_t str_len = strlen(str);\n  if (str_len >= suffix_len &&\n      memcmp(str + (str_len - suffix_len), suffix, suffix_len) == 0) {\n    ret = 1;\n  }\n\n  // Get unified labels\n  dfsan_label l1 = get_str_label(str, str_label);\n  dfsan_label l2 = get_str_label(suffix, suffix_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n  } else {\n    // Determine length for memcmp_cache\n    size_t n = strlen(str) + 1;\n    dfsan_label str_fsubstr = taint_get_str_content_label(str);\n    if (str_fsubstr != 0)\n      n = strlen(suffix) + 1;\n\n    // Create label\n    dfsan_label cmp = dfsan_union(l1, l2, __dfsan::fsuffixof, n,\n                                   (uint64_t)str, (uint64_t)suffix);\n    if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strsub(\n    const char *s, size_t start, size_t len,\n    dfsan_label s_label, dfsan_label start_label, dfsan_label len_label,\n    dfsan_label *ret_label) {\n\n  *ret_label = 0;\n  // Execute concrete operation\n  // Skip 'start' characters, then duplicate 'len' characters\n  if (s == NULL || len == 0) {\n    return NULL;\n  }\n\n  size_t str_len = strlen(s);\n  if (start >= str_len) {\n    return NULL;\n  }\n\n  // Point to start position\n  const char *src = s + start;\n  size_t remaining = str_len - start;\n  size_t copy_len = (len < remaining) ? len : remaining;\n\n  // Allocate and copy substring (like strndup)\n  char *p = (char *)malloc(copy_len + 1);\n  if (p == NULL) {\n    return NULL;\n  }\n  dfsan_memcpy(p, src, copy_len);\n  p[copy_len] = '\\0';\n\n  // Get unified label for the string\n  dfsan_label str_label = get_str_label(s, s_label);\n\n  if (str_label == 0 && start_label == 0 && len_label == 0) {\n    // No taint, nothing to propagate\n  } else {\n    // Compose strsub(str, start, len) using two fsubstr operations:\n    // 1. suffix_from_pos(str, start) = str[start:] using fsubstr with op2=1 (suffix mode)\n    // 2. prefix(suffix, len) = suffix[0:len] using fsubstr with op2=0 (prefix mode)\n\n    // Step 1: Create suffix label representing str[start:]\n    // l1 = string content, l2 = start position label\n    // op1 = concrete remaining length, op2 = 1 (suffix mode)\n    dfsan_label suffix_label = str_label;\n    if (start_label != 0 || start > 0) {\n      suffix_label = dfsan_union(str_label, start_label, __dfsan::fsubstr,\n                                  sizeof(void*) * 8, (uint64_t)remaining, 1);\n    }\n\n    // Step 2: Take first len chars from suffix: suffix[0:len]\n    // l1 = suffix label, l2 = len label\n    // op1 = concrete len, op2 = 0 (prefix mode)\n    dfsan_label substr_label = dfsan_union(suffix_label, len_label, __dfsan::fsubstr,\n                                            sizeof(void*) * 8, (uint64_t)copy_len, 0);\n\n    // Store label in content map so downstream ops can find it\n    if (substr_label != 0) {\n      taint_set_str_content_label(p, substr_label);\n      *ret_label = substr_label;\n    }\n  }\n\n  return p;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_strcasecmp(const char *s1, const char *s2, dfsan_label s1_label,\n                  dfsan_label s2_label, dfsan_label *ret_label) {\n  int ret = strcasecmp(s1, s2);\n  // doing an optimistic solving, hoping we can get the same case\n  // Use unified get_str_label for fsubstr support\n  dfsan_label l1 = get_str_label(s1, s1_label);\n  dfsan_label l2 = get_str_label(s2, s2_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n  } else {\n    size_t n = strlen(s1) + 1;\n    dfsan_label s1_fsubstr = taint_get_str_content_label(s1);\n    if (s1_fsubstr != 0)\n      n = strlen(s2) + 1;\n\n    // fstrcmp is commutative - dfsan_union will swap to put concrete in op1\n    dfsan_label cmp = dfsan_union(l1, l2, __dfsan::fstrcmp, n,\n                                   (uint64_t)s1, (uint64_t)s2);\n    if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  }\n  return ret;\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, uptr caller_pc,\n                              const char *s1, const char *s2, size_t n,\n                              dfsan_label s1_label, dfsan_label s2_label,\n                              dfsan_label n_label)\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncmp(const char *s1, const char *s2,\n                                                 size_t n, dfsan_label s1_label,\n                                                 dfsan_label s2_label,\n                                                 dfsan_label n_label,\n                                                 dfsan_label *ret_label) {\n  if (n == 0) {\n    *ret_label = 0;\n    return 0;\n  }\n\n  CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, GET_CALLER_PC(), s1, s2,\n                             n, s1_label, s2_label, n_label);\n\n  int ret = strncmp(s1, s2, n);\n\n  // Use unified get_str_label for fsubstr support\n  dfsan_label l1 = get_str_label(s1, s1_label);\n  dfsan_label l2 = get_str_label(s2, s2_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n  } else {\n    // Adjust n for shorter strings when one side is concrete\n    if (l1 == 0 && strlen(s1) < (n - 1))\n      n = strlen(s1) + 1;\n    if (l2 == 0 && strlen(s2) < (n - 1))\n      n = strlen(s2) + 1;\n\n    dfsan_label cmp = dfsan_union(l1, l2, __dfsan::fstrcmp, n,\n                                   (uint64_t)s1, (uint64_t)s2);\n    if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_strncasecmp(const char *s1, const char *s2, size_t n,\n                   dfsan_label s1_label, dfsan_label s2_label,\n                   dfsan_label n_label, dfsan_label *ret_label) {\n  if (n == 0) {\n    *ret_label = 0;\n    return 0;\n  }\n\n  int ret = strncasecmp(s1, s2, n);\n  // doing an optimistic solving here too, hoping the case can be the same\n  // Use unified get_str_label for fsubstr support\n  dfsan_label l1 = get_str_label(s1, s1_label);\n  dfsan_label l2 = get_str_label(s2, s2_label);\n\n  if (l1 == 0 && l2 == 0) {\n    *ret_label = 0;\n  } else {\n    // Adjust n for shorter strings when one side is concrete\n    if (l1 == 0 && strlen(s1) < (n - 1))\n      n = strlen(s1) + 1;\n    if (l2 == 0 && strlen(s2) < (n - 1))\n      n = strlen(s2) + 1;\n\n    dfsan_label cmp = dfsan_union(l1, l2, __dfsan::fstrcmp, n,\n                                   (uint64_t)s1, (uint64_t)s2);\n    if (cmp) __taint_trace_memcmp(cmp);\n    *ret_label = cmp;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE size_t\n__dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) {\n  size_t ret = strlen(s);\n  dfsan_label str_label = dfsan_read_label(s, ret + 1);\n\n  if (str_label == 0) {\n    *ret_label = 0;\n  } else {\n    // Check if the null terminator byte is from input (tainted)\n    // If not, it was added programmatically (e.g., by the program setting '\\0')\n    dfsan_label null_label = dfsan_read_label(s + ret, 1);\n    bool null_from_input = (null_label != 0);\n\n    // Create fstrlen label:\n    // - l1 = 0 (following fsize/fatoi pattern to avoid Alloca rejection)\n    // - l2 = str_label (content label for dependencies)\n    // - op1 = null_from_input flag (1 if null is from input, 0 if programmatic)\n    // - op2 = actual length (for solution generation)\n    // Note: str_label contains the offset info via Load labels\n    *ret_label = dfsan_union(0, str_label, fstrlen,\n                             sizeof(size_t) * 8,\n                             null_from_input ? 1 : 0, ret);\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_memcpy(void *dest, const void *src, size_t n,\n                    dfsan_label dest_label, dfsan_label src_label,\n                    dfsan_label n_label, dfsan_label *ret_label) {\n  __taint_check_bounds(src_label, (uptr)src, n_label, n);\n  __taint_check_bounds(dest_label, (uptr)dest, n_label, n);\n  if (n_label) {\n    __taint_solve_bounds(src_label, (uint64_t)src, n_label, n, 0, 1, 0, 0);\n    __taint_solve_bounds(dest_label, (uint64_t)dest, n_label, n, 0, 1, 0, 0);\n  }\n  *ret_label = dest_label;\n  return dfsan_memcpy(dest, src, n);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_memmove(void *dest, const void *src, size_t n,\n                     dfsan_label dest_label, dfsan_label src_label,\n                     dfsan_label n_label, dfsan_label *ret_label) {\n  __taint_check_bounds(src_label, (uptr)src, n_label, n);\n  __taint_check_bounds(dest_label, (uptr)dest, n_label, n);\n  if (n_label) {\n    __taint_solve_bounds(src_label, (uint64_t)src, n_label, n, 0, 1, 0, 0);\n    __taint_solve_bounds(dest_label, (uint64_t)dest, n_label, n, 0, 1, 0, 0);\n  }\n  dfsan_label tmp[n];\n  dfsan_label *sdest = shadow_for(dest);\n  const dfsan_label *ssrc = shadow_for(src);\n  internal_memcpy((void *)tmp, (const void *)ssrc, n * sizeof(dfsan_label));\n  void *ret = internal_memmove(dest, src, n);\n  internal_memcpy((void *)sdest, (const void *)tmp, n * sizeof(dfsan_label));\n  *ret_label = dest_label;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_memset(void *s, int c, size_t n,\n                    dfsan_label s_label, dfsan_label c_label,\n                    dfsan_label n_label, dfsan_label *ret_label) {\n  __taint_check_bounds(s_label, (uptr)s, n_label, n);\n  if (n_label)\n    __taint_solve_bounds(s_label, (uint64_t)s, n_label, n, 0, 1, 0, 0);\n  dfsan_memset(s, c, c_label, n);\n  *ret_label = s_label;\n  return s;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_tolower(int c, dfsan_label c_label, dfsan_label *ret_label) {\n  int ret = tolower(c);\n  *ret_label = dfsan_union(0, c_label, __dfsan::Or, 8, 0x20, 0);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_toupper(int c, dfsan_label c_label, dfsan_label *ret_label) {\n  int ret = toupper(c);\n  *ret_label = dfsan_union(0, c_label, __dfsan::And, 8, 0x5f, 0);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_strcat(char *dest, const char *src, dfsan_label d_label,\n                    dfsan_label s_label, dfsan_label *ret_label) {\n  size_t dest_len = strlen(dest);\n  size_t copy_len = strlen(src) + 1; // including tailing '\\0'\n  __taint_check_bounds(d_label, (uptr)dest, 0, dest_len + copy_len);\n\n  // Get labels for both strings using unified label retrieval\n  dfsan_label dest_str_label = get_str_label(dest, d_label);\n  dfsan_label src_str_label = get_str_label(src, s_label);\n\n  AOUT(\"strcat: dest=%p, src=%p, dest_label=%u, src_label=%u, \"\n       \"dest_str_label=%u, src_str_label=%u\\n\",\n       dest, src, d_label, s_label, dest_str_label, src_str_label);\n\n  // Perform the actual strcat (copy src to dest + dest_len)\n  dfsan_memcpy(dest + dest_len, src, copy_len);\n\n  // If either string is tainted, create fstrcat label\n  if (dest_str_label != 0 || src_str_label != 0) {\n    // Create fstrcat: l1=dest, l2=src\n    // op1=dest pointer, op2=src pointer (for concrete content access)\n    // size = length of concrete operand (for memcmp_cache), 0 if both symbolic\n    size_t src_len = copy_len - 1;  // excluding null\n    uint16_t concrete_len = 0;\n    if (dest_str_label == 0) {\n      concrete_len = (uint16_t)dest_len;\n    } else if (src_str_label == 0) {\n      concrete_len = (uint16_t)src_len;\n    }\n    dfsan_label concat_label = dfsan_union(dest_str_label, src_str_label,\n                                            __dfsan::fstrcat,\n                                            concrete_len,\n                                            (uint64_t)dest,\n                                            (uint64_t)src);\n    AOUT(\"strcat: created fstrcat label=%u\\n\", concat_label);\n    // Send concrete content through pipe if one side is concrete\n    if (concrete_len > 0) {\n      __taint_trace_memcmp(concat_label);\n    }\n    // Store in str_map so downstream ops can find it\n    taint_set_str_content_label(dest, concat_label);\n  }\n\n  *ret_label = d_label;\n  return dest;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *\n__dfsw_strncat(char *dest, const char *src, size_t n,\n               dfsan_label d_label, dfsan_label s_label, dfsan_label n_label,\n               dfsan_label *ret_label) {\n  size_t dest_len = strlen(dest);\n  size_t src_len = strlen(src);\n  size_t copy_len = (n < src_len) ? n : src_len;  // min(n, strlen(src))\n  __taint_check_bounds(d_label, (uptr)dest, 0, dest_len + copy_len + 1);\n\n  AOUT(\"strncat: dest=%p, src=%p, n=%zu, d_label=%u, s_label=%u, n_label=%u\\n\",\n       dest, src, n, d_label, s_label, n_label);\n\n  // Get dest label using unified label retrieval\n  dfsan_label dest_str_label = get_str_label(dest, d_label);\n\n  // Get src label - use get_str_label_n to handle symbolic n\n  // This will create fsubstr if n_label derives from a string op (e.g., strchr)\n  dfsan_label src_str_label = get_str_label_n(src, s_label, copy_len, n_label);\n\n  AOUT(\"strncat: dest_str_label=%u, src_str_label=%u, copy_len=%zu\\n\",\n       dest_str_label, src_str_label, copy_len);\n\n  // Perform the actual strncat\n  dfsan_memcpy(dest + dest_len, src, copy_len);\n  dest[dest_len + copy_len] = '\\0';\n\n  // If either string is tainted, create fstrcat label\n  if (dest_str_label != 0 || src_str_label != 0) {\n    // Create fstrcat: l1=dest, l2=src\n    // op1=dest pointer, op2=src pointer (for concrete content access)\n    // size = length of concrete operand (for memcmp_cache), 0 if both symbolic\n    uint16_t concrete_len = 0;\n    if (dest_str_label == 0) {\n      concrete_len = (uint16_t)dest_len;\n    } else if (src_str_label == 0) {\n      concrete_len = (uint16_t)copy_len;\n    }\n    dfsan_label concat_label = dfsan_union(dest_str_label, src_str_label,\n                                            __dfsan::fstrcat,\n                                            concrete_len,\n                                            (uint64_t)dest,\n                                            (uint64_t)src);\n    AOUT(\"strncat: created fstrcat label=%u\\n\", concat_label);\n    // Send concrete content through pipe if one side is concrete\n    if (concrete_len > 0) {\n      __taint_trace_memcmp(concat_label);\n    }\n    // Store in content map so downstream ops can find it\n    taint_set_str_content_label(dest, concat_label);\n  }\n\n  *ret_label = d_label;\n  return dest;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *\n__dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) {\n  size_t len = strlen(s);\n  void *p = malloc(len+1);\n  if (p == nullptr) {\n    *ret_label = 0;\n    return nullptr;\n  }\n  dfsan_memcpy(p, s, len+1);\n\n  // Propagate string label to duplicated string\n  dfsan_label str_label = get_str_label(s, s_label);\n  if (str_label != 0) {\n    taint_set_str_content_label(static_cast<char *>(p), str_label);\n  }\n\n  *ret_label = 0;\n  return static_cast<char *>(p);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *\n__dfsw_strndup(const char *s, size_t n, dfsan_label s_label,\n               dfsan_label n_label, dfsan_label *ret_label) {\n  size_t len = strnlen(s, n);\n  void *p = malloc(len + 1);\n  if (p == nullptr) {\n    *ret_label = 0;\n    return nullptr;\n  }\n  dfsan_memcpy(p, s, len);\n  ((char *)p)[len] = '\\0';\n\n  // Propagate string label to duplicated string\n  dfsan_label str_label = get_str_label_n(s, s_label, len, n_label);\n  if (str_label != 0) {\n    taint_set_str_content_label(static_cast<char *>(p), str_label);\n  }\n\n  *ret_label = 0;\n  return static_cast<char *>(p);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *\n__dfsw___strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) {\n  size_t len = strlen(s);\n  void *p = malloc(len+1);\n  if (p == nullptr) {\n    *ret_label = 0;\n    return nullptr;\n  }\n  dfsan_memcpy(p, s, len+1);\n\n  // Propagate string label to duplicated string\n  dfsan_label str_label = get_str_label(s, s_label);\n  if (str_label != 0) {\n    taint_set_str_content_label(static_cast<char *>(p), str_label);\n  }\n\n  *ret_label = 0;\n  return static_cast<char *>(p);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *\n__dfsw___strndup(const char *s, size_t n, dfsan_label s_label,\n                 dfsan_label n_label, dfsan_label *ret_label) {\n  size_t len = strnlen(s, n);\n  char *p = static_cast<char *>(malloc(len+1));\n  if (p == nullptr) {\n    *ret_label = 0;\n    return nullptr;\n  }\n  dfsan_memcpy(p, s, len); // copy at most n bytes\n  p[len] = '\\0';\n\n  // Propagate string label to duplicated string\n  dfsan_label str_label = get_str_label_n(s, s_label, len, n_label);\n  if (str_label != 0) {\n    taint_set_str_content_label(static_cast<char *>(p), str_label);\n  }\n\n  *ret_label = 0;\n  return p;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *\n__dfsw_strncpy(char *s1, const char *s2, size_t n, dfsan_label s1_label,\n               dfsan_label s2_label, dfsan_label n_label,\n               dfsan_label *ret_label) {\n  size_t len = strlen(s2);\n  size_t copy_len = len < n ? len : n;\n\n  if (n_label)\n    __taint_solve_bounds(s1_label, (uint64_t)s1, n_label, n, 0, 1, 0, 0);\n\n  // Check if n_label derives from a string op (e.g., strchr index)\n  dfsan_label str_op_label = n_label ? find_string_op_source(n_label) : 0;\n  bool created_fsubstr = false;\n\n  if (str_op_label != 0) {\n    // Get the content label from the string op\n    dfsan_label_info *str_op_info = dfsan_get_label_info(str_op_label);\n    dfsan_label str_op_content = str_op_info->l1;  // content from strchr\n\n    // Verify buffers match: str_op searched the same buffer we're copying\n    // When copy_len = 0, we can't read from s2, so trust str_op_content\n    bool buffers_match = false;\n    if (str_op_content >= CONST_OFFSET) {\n      if (copy_len > 0) {\n        dfsan_label src_content = dfsan_read_label(s2, copy_len);\n        if (src_content >= CONST_OFFSET) {\n          dfsan_label src_base = get_base_input_label(src_content);\n          dfsan_label str_op_base = get_base_input_label(str_op_content);\n          buffers_match = (src_base != 0 && src_base == str_op_base);\n        }\n      } else {\n        // copy_len = 0: trust the str_op_content (empty substring case)\n        buffers_match = true;\n      }\n    }\n\n    if (buffers_match) {\n      // Create fsubstr: represents substr(src, 0, len) where len is symbolic\n      // Use str_op_content (full haystack) for proper string theory solving\n      dfsan_label substr_label = dfsan_union(str_op_content, str_op_label,\n                                              __dfsan::fsubstr,\n                                              sizeof(void*) * 8,\n                                              (uint64_t)n, 0);\n\n      // Store fsubstr label in runtime map keyed by destination address\n      // This survives buffer content being overwritten (e.g., key[len] = '\\0')\n      taint_set_str_content_label(s1, substr_label);\n\n      *ret_label = s1_label;\n    }\n  }\n\n  // Normal case: copy byte-by-byte labels\n  if (len < n) {\n    dfsan_memcpy(s1, s2, len + 1);\n  } else {\n    dfsan_memcpy(s1, s2, n);\n  }\n\n  // Handle padding (strncpy pads with zeros if len < n)\n  if (len < n) {\n    dfsan_memset(s1 + len + 1, 0, 0, n - len - 1);\n  }\n\n  *ret_label = s1_label;\n  return s1;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t\n__dfsw_pread(int fd, void *buf, size_t count, off_t offset,\n             dfsan_label fd_label, dfsan_label buf_label,\n             dfsan_label count_label, dfsan_label offset_label,\n             dfsan_label *ret_label) {\n  __taint_check_bounds(buf_label, (uptr)buf, count_label, count);\n  if (count_label)\n    __taint_solve_bounds(buf_label, (uint64_t)buf, count_label, count, 0, 1, 0, 0);\n  ssize_t ret = pread(fd, buf, count, offset);\n  *ret_label = 0;\n  if (ret >= 0) {\n    if (taint_get_file(fd)) {\n      for (ssize_t i = 0; i < ret; i++) {\n        dfsan_set_label(get_label_for(fd, offset + i), (char *)buf + i, 1);\n      }\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n    } else {\n      dfsan_set_label(0, buf, ret);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t\n__dfsw_pread64(int fd, void *buf, size_t count, off_t offset,\n               dfsan_label fd_label, dfsan_label buf_label,\n               dfsan_label count_label, dfsan_label offset_label,\n               dfsan_label *ret_label) {\n  __taint_check_bounds(buf_label, (uptr)buf, count_label, count);\n  if (count_label)\n    __taint_solve_bounds(buf_label, (uint64_t)buf, count_label, count, 0, 1, 0, 0);\n  ssize_t ret = pread64(fd, buf, count, offset);\n  *ret_label = 0;\n  if (ret >= 0) {\n    if (taint_get_file(fd)) {\n      for (ssize_t i = 0; i < ret; i++) {\n        dfsan_set_label(get_label_for(fd, offset + i), (char *)buf + i, 1);\n      }\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n    } else {\n      dfsan_set_label(0, buf, ret);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t\n__dfsw_read(int fd, void *buf, size_t count,\n             dfsan_label fd_label, dfsan_label buf_label,\n             dfsan_label count_label,\n             dfsan_label *ret_label) {\n  off_t offset = lseek(fd, 0, SEEK_CUR);\n  __taint_check_bounds(buf_label, (uptr)buf, count_label, count);\n  if (count_label)\n    __taint_solve_bounds(buf_label, (uint64_t)buf, count_label, count, 0, 1, 0, 0);\n  ssize_t ret = read(fd, buf, count);\n  *ret_label = 0;\n  if (ret >= 0) {\n    if (taint_get_file(fd)) {\n      AOUT(\"offset = %ld, ret = %ld\\n\", offset, ret);\n      for(ssize_t i = 0; i < ret; i++) {\n        dfsan_set_label(get_label_for(fd, offset + i), (char *)buf + i, 1);\n      }\n      // for (size_t i = ret; i < count; i++)\n      //   dfsan_set_label(-1, (char *)buf + i, 1);\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n    } else {\n      dfsan_set_label(0, buf, ret);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_clock_gettime(clockid_t clk_id,\n                                                       struct timespec *tp,\n                                                       dfsan_label clk_id_label,\n                                                       dfsan_label tp_label,\n                                                       dfsan_label *ret_label) {\n  int ret = clock_gettime(clk_id, tp);\n  if (ret == 0)\n    dfsan_set_label(0, tp, sizeof(struct timespec));\n  *ret_label = 0;\n  return ret;\n}\n\nstatic void dfsan_set_zero_label(const void *ptr, uptr size) {\n  dfsan_set_label(0, const_cast<void *>(ptr), size);\n}\n\n// dlopen() ultimately calls mmap() down inside the loader, which generally\n// doesn't participate in dynamic symbol resolution.  Therefore we won't\n// intercept its calls to mmap, and we have to hook it here.\nSANITIZER_INTERFACE_ATTRIBUTE void *\n__dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,\n              dfsan_label flag_label, dfsan_label *ret_label) {\n  void *handle = dlopen(filename, flag);\n  link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);\n  if (map && map->l_addr)\n    ForEachMappedRegion(map, dfsan_set_zero_label);\n  *ret_label = 0;\n  return handle;\n}\n\nstruct pthread_create_info {\n  void *(*start_routine_trampoline)(void *, void *, dfsan_label, dfsan_label *);\n  void *start_routine;\n  void *arg;\n};\n\nstatic void *pthread_create_cb(void *p) {\n  pthread_create_info pci(*(pthread_create_info *)p);\n  free(p);\n  dfsan_label ret_label;\n  return pci.start_routine_trampoline(pci.start_routine, pci.arg, 0,\n                                      &ret_label);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create(\n    pthread_t *thread, const pthread_attr_t *attr,\n    void *(*start_routine_trampoline)(void *, void *, dfsan_label,\n                                      dfsan_label *),\n    void *start_routine, void *arg, dfsan_label thread_label,\n    dfsan_label attr_label, dfsan_label start_routine_label,\n    dfsan_label arg_label, dfsan_label *ret_label) {\n  pthread_create_info *pci =\n      (pthread_create_info *)malloc(sizeof(pthread_create_info));\n  pci->start_routine_trampoline = start_routine_trampoline;\n  pci->start_routine = start_routine;\n  pci->arg = arg;\n  int rv = pthread_create(thread, attr, pthread_create_cb, (void *)pci);\n  if (rv != 0)\n    free(pci);\n  *ret_label = 0;\n  return rv;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_join(pthread_t thread,\n                                                      void **retval,\n                                                      dfsan_label thread_label,\n                                                      dfsan_label retval_label,\n                                                      dfsan_label *ret_label) {\n  int ret = pthread_join(thread, retval);\n  if (ret == 0 && retval)\n    dfsan_set_label(0, retval, sizeof(*retval));\n  *ret_label = 0;\n  return ret;\n}\n\nstruct dl_iterate_phdr_info {\n  int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,\n                             size_t size, void *data, dfsan_label info_label,\n                             dfsan_label size_label, dfsan_label data_label,\n                             dfsan_label *ret_label);\n  void *callback;\n  void *data;\n};\n\nint dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) {\n  dl_iterate_phdr_info *dipi = (dl_iterate_phdr_info *)data;\n  dfsan_set_label(0, *info);\n  dfsan_set_label(0, const_cast<char *>(info->dlpi_name),\n                  strlen(info->dlpi_name) + 1);\n  dfsan_set_label(\n      0, const_cast<char *>(reinterpret_cast<const char *>(info->dlpi_phdr)),\n      sizeof(*info->dlpi_phdr) * info->dlpi_phnum);\n  dfsan_label ret_label;\n  return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0,\n                                   0, &ret_label);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr(\n    int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,\n                               size_t size, void *data, dfsan_label info_label,\n                               dfsan_label size_label, dfsan_label data_label,\n                               dfsan_label *ret_label),\n    void *callback, void *data, dfsan_label callback_label,\n    dfsan_label data_label, dfsan_label *ret_label) {\n  dl_iterate_phdr_info dipi = { callback_trampoline, callback, data };\n  *ret_label = 0;\n  return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi);\n}\n\n#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 27\n// This function is only available for glibc 2.27 or newer.  Mark it weak so\n// linking succeeds with older glibcs.\nSANITIZER_WEAK_ATTRIBUTE void _dl_get_tls_static_info(size_t *sizep,\n                                                      size_t *alignp);\n\nSANITIZER_INTERFACE_ATTRIBUTE void __dfsw__dl_get_tls_static_info(\n    size_t *sizep, size_t *alignp, dfsan_label sizep_label,\n    dfsan_label alignp_label) {\n  assert(_dl_get_tls_static_info);\n  _dl_get_tls_static_info(sizep, alignp);\n  dfsan_set_label(0, sizep, sizeof(*sizep));\n  dfsan_set_label(0, alignp, sizeof(*alignp));\n}\n#endif\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label,\n                     dfsan_label buf_label, dfsan_label *ret_label) {\n  char *ret = ctime_r(timep, buf);\n  if (ret) {\n    dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), buf,\n                    strlen(buf) + 1);\n    *ret_label = buf_label;\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_getcwd(char *buf, size_t size, dfsan_label buf_label,\n                    dfsan_label size_label, dfsan_label *ret_label) {\n  char *ret = getcwd(buf, size);\n  if (ret) {\n    dfsan_set_label(0, ret, strlen(ret) + 1);\n    *ret_label = buf_label;\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_get_current_dir_name(dfsan_label *ret_label) {\n  char *ret = get_current_dir_name();\n  if (ret) {\n    dfsan_set_label(0, ret, strlen(ret) + 1);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\n#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 25\n// This function is only available for glibc 2.25 or newer.  Mark it weak so\n// linking succeeds with older glibcs.\nSANITIZER_WEAK_ATTRIBUTE int getentropy(void *buffer, size_t length);\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getentropy(void *buffer, size_t length,\n                                                    dfsan_label buffer_label,\n                                                    dfsan_label length_label,\n                                                    dfsan_label *ret_label) {\n  int ret = getentropy(buffer, length);\n  if (ret == 0) {\n    dfsan_set_label(0, buffer, length);\n  }\n  *ret_label = 0;\n  return ret;\n}\n#endif\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_gethostname(char *name, size_t len, dfsan_label name_label,\n                       dfsan_label len_label, dfsan_label *ret_label) {\n  int ret = gethostname(name, len);\n  if (ret == 0) {\n    dfsan_set_label(0, name, strlen(name) + 1);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getpeername(\n    int sockfd, struct sockaddr *addr, socklen_t *addrlen,\n    dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,\n    dfsan_label *ret_label) {\n  socklen_t origlen = addrlen ? *addrlen : 0;\n  int ret = getpeername(sockfd, addr, addrlen);\n  if (ret != -1 && addr && addrlen) {\n    socklen_t written_bytes = origlen < *addrlen ? origlen : *addrlen;\n    dfsan_set_label(0, addrlen, sizeof(*addrlen));\n    dfsan_set_label(0, addr, written_bytes);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_getrlimit(int resource, struct rlimit *rlim,\n                     dfsan_label resource_label, dfsan_label rlim_label,\n                     dfsan_label *ret_label) {\n  int ret = getrlimit(resource, rlim);\n  if (ret == 0) {\n    dfsan_set_label(0, rlim, sizeof(struct rlimit));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_getrusage(int who, struct rusage *usage, dfsan_label who_label,\n                     dfsan_label usage_label, dfsan_label *ret_label) {\n  int ret = getrusage(who, usage);\n  if (ret == 0) {\n    dfsan_set_label(0, usage, sizeof(struct rusage));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockname(\n    int sockfd, struct sockaddr *addr, socklen_t *addrlen,\n    dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,\n    dfsan_label *ret_label) {\n  socklen_t origlen = addrlen ? *addrlen : 0;\n  int ret = getsockname(sockfd, addr, addrlen);\n  if (ret != -1 && addr && addrlen) {\n    socklen_t written_bytes = origlen < *addrlen ? origlen : *addrlen;\n    dfsan_set_label(0, addrlen, sizeof(*addrlen));\n    dfsan_set_label(0, addr, written_bytes);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockopt(\n    int sockfd, int level, int optname, void *optval, socklen_t *optlen,\n    dfsan_label sockfd_label, dfsan_label level_label,\n    dfsan_label optname_label, dfsan_label optval_label,\n    dfsan_label optlen_label, dfsan_label *ret_label) {\n  int ret = getsockopt(sockfd, level, optname, optval, optlen);\n  if (ret != -1 && optval && optlen) {\n    dfsan_set_label(0, optlen, sizeof(*optlen));\n    dfsan_set_label(0, optval, *optlen);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req,\n                                                   struct timespec *rem,\n                                                   dfsan_label req_label,\n                                                   dfsan_label rem_label,\n                                                   dfsan_label *ret_label) {\n  int ret = nanosleep(req, rem);\n  *ret_label = 0;\n  if (ret == -1) {\n    // Interrupted by a signal, rem is filled with the remaining time.\n    dfsan_set_label(0, rem, sizeof(struct timespec));\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_stpcpy(char *dest, const char *src, dfsan_label dest_label,\n                    dfsan_label src_label, dfsan_label *ret_label) {\n  size_t len = strlen(src) + 1;\n  __taint_check_bounds(dest_label, (uptr)dest, 0, len);\n  char *ret = stpcpy(dest, src);\n  if (ret) {\n    internal_memcpy(shadow_for(dest), shadow_for(src), sizeof(dfsan_label) * len);\n  }\n  *ret_label = dest_label;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label,\n                    dfsan_label src_label, dfsan_label *ret_label) {\n  size_t len = strlen(src) + 1;\n  __taint_check_bounds(dst_label, (uptr)dest, 0, len);\n  char *ret = strcpy(dest, src);\n  *ret_label = dst_label;\n\n  // Use get_str_label to properly get the source label\n  // This handles str_map, pointer label string ops, and buffer content\n  dfsan_label real_src_label = get_str_label(src, src_label);\n  AOUT(\"strcpy: src='%p', src_label=%d, real_src_label=%d\\n\", src, src_label, real_src_label);\n\n  if (real_src_label != 0) {\n    // Store the label in runtime map keyed by destination address\n    taint_set_str_content_label(dest, real_src_label);\n    *ret_label = real_src_label;\n  }\n\n  if (ret) {\n    internal_memcpy(shadow_for(dest), shadow_for(src),\n                    sizeof(dfsan_label) * len);\n  }\n  return ret;\n}\n\nstatic dfsan_label taint_strtol(const char *nptr, uptr len, size_t ret_size, int base) {\n  dfsan_label load = 0;\n  if (len > 0) {\n    load = dfsan_read_label(nptr, len);\n  } else {\n    // well, no byte get consumed, handle specially\n    dfsan_label l = shadow_for(nptr)[0];\n    if (l == 0) // constant\n      return 0;\n    load = dfsan_union(l, 0, Load, 0, 0, 0);\n  }\n  return dfsan_union(0, load, fatoi, sizeof(ret_size) * 8, base, len);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_atoi(const char *nptr, dfsan_label nptr_label, dfsan_label *ret_label) {\n  char *tmp_endptr;\n  int ret = (int)strtol(nptr, &tmp_endptr, 10);\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), 10);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nlong __dfsw_atol(const char *nptr, dfsan_label nptr_label, dfsan_label *ret_label) {\n  char *tmp_endptr;\n  long ret = strtol(nptr, &tmp_endptr, 10);\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), 10);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nlong long __dfsw_atoll(const char *nptr, dfsan_label nptr_label, dfsan_label *ret_label) {\n  char *tmp_endptr;\n  long long ret = strtoll(nptr, &tmp_endptr, 10);\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), 10);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nlong __dfsw_strtol(const char *nptr, char **endptr, int base,\n                   dfsan_label nptr_label, dfsan_label endptr_label,\n                   dfsan_label base_label, dfsan_label *ret_label) {\n  char *tmp_endptr;\n  long ret = strtol(nptr, &tmp_endptr, base);\n  if (endptr) {\n    *endptr = tmp_endptr;\n  }\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), base);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\ndouble __dfsw_strtod(const char *nptr, char **endptr,\n                     dfsan_label nptr_label, dfsan_label endptr_label,\n                     dfsan_label *ret_label) {\n  char *tmp_endptr;\n  double ret = strtod(nptr, &tmp_endptr);\n  if (endptr) {\n    *endptr = tmp_endptr;\n  }\n  *ret_label = 0; // TODO: implement\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nlong long __dfsw_strtoll(const char *nptr, char **endptr, int base,\n                         dfsan_label nptr_label, dfsan_label endptr_label,\n                         dfsan_label base_label, dfsan_label *ret_label) {\n  char *tmp_endptr;\n  long long ret = strtoll(nptr, &tmp_endptr, base);\n  if (endptr) {\n    *endptr = tmp_endptr;\n  }\n  AOUT(\"strtoll: %s\\n\", nptr);\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), base);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nunsigned long __dfsw_strtoul(const char *nptr, char **endptr, int base,\n                             dfsan_label nptr_label, dfsan_label endptr_label,\n                             dfsan_label base_label, dfsan_label *ret_label) {\n  char *tmp_endptr;\n  unsigned long ret = strtoul(nptr, &tmp_endptr, base);\n  if (endptr) {\n    *endptr = tmp_endptr;\n  }\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), base);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nunsigned long long __dfsw_strtoull(const char *nptr, char **endptr,\n                                   dfsan_label nptr_label,\n                                   int base, dfsan_label endptr_label,\n                                   dfsan_label base_label,\n                                   dfsan_label *ret_label) {\n  char *tmp_endptr;\n  unsigned long long ret = strtoull(nptr, &tmp_endptr, base);\n  if (endptr) {\n    *endptr = tmp_endptr;\n  }\n  uptr len = (uptr)tmp_endptr - (uptr)nptr;\n  *ret_label = taint_strtol(nptr, len, sizeof(ret), base);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\ntime_t __dfsw_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label) {\n  time_t ret = time(t);\n  if (ret != (time_t) -1 && t) {\n    dfsan_set_label(0, t, sizeof(time_t));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_inet_pton(int af, const char *src, void *dst, dfsan_label af_label,\n                     dfsan_label src_label, dfsan_label dst_label,\n                     dfsan_label *ret_label) {\n  int ret = inet_pton(af, src, dst);\n  if (ret == 1) {\n    dfsan_set_label(dfsan_read_label(src, strlen(src) + 1), dst,\n                    af == AF_INET ? sizeof(struct in_addr) : sizeof(in6_addr));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nstruct tm *__dfsw_localtime_r(const time_t *timep, struct tm *result,\n                              dfsan_label timep_label, dfsan_label result_label,\n                              dfsan_label *ret_label) {\n  struct tm *ret = localtime_r(timep, result);\n  if (ret) {\n    dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), result,\n                    sizeof(struct tm));\n    *ret_label = result_label;\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_getpwuid_r(id_t uid, struct passwd *pwd,\n                      char *buf, size_t buflen, struct passwd **result,\n                      dfsan_label uid_label, dfsan_label pwd_label,\n                      dfsan_label buf_label, dfsan_label buflen_label,\n                      dfsan_label result_label, dfsan_label *ret_label) {\n  // Store the data in pwd, the strings referenced from pwd in buf, and the\n  // address of pwd in *result.  On failure, NULL is stored in *result.\n  int ret = getpwuid_r(uid, pwd, buf, buflen, result);\n  if (ret == 0) {\n    dfsan_set_label(0, pwd, sizeof(struct passwd));\n    dfsan_set_label(0, buf, strlen(buf) + 1);\n  }\n  *ret_label = 0;\n  dfsan_set_label(0, result, sizeof(struct passwd*));\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_epoll_wait(int epfd, struct epoll_event *events, int maxevents,\n                      int timeout, dfsan_label epfd_label,\n                      dfsan_label events_label, dfsan_label maxevents_label,\n                      dfsan_label timeout_label, dfsan_label *ret_label) {\n  int ret = epoll_wait(epfd, events, maxevents, timeout);\n  if (ret > 0)\n    dfsan_set_label(0, events, ret * sizeof(*events));\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout,\n                dfsan_label dfs_label, dfsan_label nfds_label,\n                dfsan_label timeout_label, dfsan_label *ret_label) {\n  int ret = poll(fds, nfds, timeout);\n  if (ret >= 0) {\n    for (; nfds > 0; --nfds) {\n      dfsan_set_label(0, &fds[nfds - 1].revents, sizeof(fds[nfds - 1].revents));\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_select(int nfds, fd_set *readfds, fd_set *writefds,\n                  fd_set *exceptfds, struct timeval *timeout,\n                  dfsan_label nfds_label, dfsan_label readfds_label,\n                  dfsan_label writefds_label, dfsan_label exceptfds_label,\n                  dfsan_label timeout_label, dfsan_label *ret_label) {\n  int ret = select(nfds, readfds, writefds, exceptfds, timeout);\n  // Clear everything (also on error) since their content is either set or\n  // undefined.\n  if (readfds) {\n    dfsan_set_label(0, readfds, sizeof(fd_set));\n  }\n  if (writefds) {\n    dfsan_set_label(0, writefds, sizeof(fd_set));\n  }\n  if (exceptfds) {\n    dfsan_set_label(0, exceptfds, sizeof(fd_set));\n  }\n  if (timeout) {\n    dfsan_set_label(0, timeout, sizeof(struct timeval));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask,\n                             dfsan_label pid_label,\n                             dfsan_label cpusetsize_label,\n                             dfsan_label mask_label, dfsan_label *ret_label) {\n  int ret = sched_getaffinity(pid, cpusetsize, mask);\n  if (ret == 0) {\n    dfsan_set_label(0, mask, cpusetsize);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_sigemptyset(sigset_t *set, dfsan_label set_label,\n                       dfsan_label *ret_label) {\n  int ret = sigemptyset(set);\n  dfsan_set_label(0, set, sizeof(sigset_t));\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_sigaction(int signum, const struct sigaction *act,\n                     struct sigaction *oldact, dfsan_label signum_label,\n                     dfsan_label act_label, dfsan_label oldact_label,\n                     dfsan_label *ret_label) {\n  int ret = sigaction(signum, act, oldact);\n  if (oldact) {\n    dfsan_set_label(0, oldact, sizeof(struct sigaction));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_sigaltstack(const stack_t *ss, stack_t *old_ss, dfsan_label ss_label,\n                       dfsan_label old_ss_label, dfsan_label *ret_label) {\n  int ret = sigaltstack(ss, old_ss);\n  if (ret != -1 && old_ss)\n    dfsan_set_label(0, old_ss, sizeof(*old_ss));\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz,\n                        dfsan_label tv_label, dfsan_label tz_label,\n                        dfsan_label *ret_label) {\n  int ret = gettimeofday(tv, tz);\n  if (tv) {\n    dfsan_set_label(0, tv, sizeof(struct timeval));\n  }\n  if (tz) {\n    dfsan_set_label(0, tz, sizeof(struct timezone));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memchr(void *s, int c, size_t n,\n                                                  dfsan_label s_label,\n                                                  dfsan_label c_label,\n                                                  dfsan_label n_label,\n                                                  dfsan_label *ret_label) {\n  void *ret = memchr(s, c, n);\n\n  // Use unified get_str_label_n for source label\n  // Pass n_label to handle fsubstr creation when n derives from a string op\n  dfsan_label src_label = get_str_label_n(s, s_label, n, n_label);\n\n  if (src_label != 0 || c_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    uint16_t content_len = (src_label == 0) ? (uint16_t)n : 0;\n\n    // l1 = src_label (haystack content)\n    // l2 = c_label (character to find)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = character value\n    // size = haystack length if haystack concrete, else 0\n    *ret_label = dfsan_union(src_label, c_label, __dfsan::fstrchr,\n                             content_len,\n                             (uint64_t)s, (uint64_t)(uint8_t)c);\n\n    // Send concrete content if haystack is concrete\n    if (content_len > 0 && *ret_label) {\n      __taint_trace_memcmp(*ret_label);\n    }\n\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strrchr(char *s, int c,\n                                                   dfsan_label s_label,\n                                                   dfsan_label c_label,\n                                                   dfsan_label *ret_label) {\n  char *ret = strrchr(s, c);\n\n  // Use unified get_str_label for source label\n  dfsan_label src_label = get_str_label(s, s_label);\n\n  if (src_label != 0 || c_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    size_t haystack_len = strlen(s);\n    uint16_t content_len = (src_label == 0) ? (uint16_t)haystack_len : 0;\n\n    // l1 = src_label (source - for chaining or content dependencies)\n    // l2 = c_label (target char - may be symbolic!)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = char value\n    // size = haystack length if concrete, else 0\n    *ret_label = dfsan_union(src_label, c_label, __dfsan::fstrrchr,\n                             content_len,\n                             (uint64_t)s,\n                             (uint64_t)(uint8_t)c);\n\n    // Send concrete haystack content if haystack is concrete\n    if (content_len > 0 && *ret_label) {\n      __taint_trace_memcmp(*ret_label);\n    }\n\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memrchr(const void *s, int c, size_t n,\n                                                   dfsan_label s_label,\n                                                   dfsan_label c_label,\n                                                   dfsan_label n_label,\n                                                   dfsan_label *ret_label) {\n  void *ret = const_cast<void*>(memrchr(s, c, n));\n\n  // Use unified get_str_label_n for source label\n  // Pass n_label to handle fsubstr creation when n derives from a string op\n  dfsan_label src_label = get_str_label_n(s, s_label, n, n_label);\n\n  if (src_label != 0 || c_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    uint16_t content_len = (src_label == 0) ? (uint16_t)n : 0;\n\n    // l1 = src_label (haystack content)\n    // l2 = c_label (character to find)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = character value\n    // size = haystack length if haystack concrete, else 0\n    *ret_label = dfsan_union(src_label, c_label, __dfsan::fstrrchr,\n                             content_len,\n                             (uint64_t)s, (uint64_t)(uint8_t)c);\n\n    // Send concrete content if haystack is concrete\n    if (content_len > 0 && *ret_label) {\n      __taint_trace_memcmp(*ret_label);\n    }\n\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strstr(char *haystack, char *needle,\n                                                  dfsan_label haystack_label,\n                                                  dfsan_label needle_label,\n                                                  dfsan_label *ret_label) {\n  char *ret = strstr(haystack, needle);\n\n  // Use unified get_str_label for haystack and needle\n  dfsan_label src_label = get_str_label(haystack, haystack_label);\n  dfsan_label real_needle_label = get_str_label(needle, needle_label);\n\n  if (src_label != 0 || real_needle_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    size_t haystack_len = strlen(haystack);\n    size_t needle_len = strlen(needle);\n    uint16_t content_len = 0;\n    if (src_label == 0) {\n      content_len = (uint16_t)haystack_len;\n    } else if (real_needle_label == 0) {\n      content_len = (uint16_t)needle_len;\n    }\n\n    // l1 = src_label (source - for chaining or content dependencies)\n    // l2 = real_needle_label (may be symbolic string!)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = needle pointer (for concrete content retrieval)\n    // size = haystack length if haystack concrete, else needle length if needle concrete, else 0\n    dfsan_label label = dfsan_union(src_label, real_needle_label, __dfsan::fstrstr,\n                                    content_len,\n                                    (uint64_t)haystack,\n                                    (uint64_t)needle);\n\n    // Send concrete content (haystack or needle)\n    if (content_len > 0 && label) {\n      __taint_trace_memcmp(label);\n    }\n\n    *ret_label = label;\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\n// strnstr implementation (BSD function not available on Linux)\nstatic char *strnstr_impl(const char *haystack, const char *needle, size_t len) {\n  size_t needle_len = strlen(needle);\n  if (needle_len == 0)\n    return (char *)haystack;\n\n  if (len == 0)\n    return NULL;\n\n  for (size_t i = 0; i < len && haystack[i]; i++) {\n    if (i + needle_len > len)\n      break;\n    if (strncmp(haystack + i, needle, needle_len) == 0)\n      return (char *)(haystack + i);\n  }\n  return NULL;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strnstr(char *haystack, char *needle,\n                                                   size_t len,\n                                                   dfsan_label haystack_label,\n                                                   dfsan_label needle_label,\n                                                   dfsan_label len_label,\n                                                   dfsan_label *ret_label) {\n  char *ret = strnstr_impl(haystack, needle, len);\n\n  // Use unified get_str_label_n for haystack (respects length parameter)\n  dfsan_label src_label = get_str_label_n(haystack, haystack_label,\n                                          strnlen(haystack, len), len_label);\n  // Use unified get_str_label for needle\n  dfsan_label real_needle_label = get_str_label(needle, needle_label);\n\n  if (src_label != 0 || real_needle_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    size_t haystack_len = strnlen(haystack, len);\n    size_t needle_len = strlen(needle);\n    uint16_t content_len = 0;\n    if (src_label == 0) {\n      content_len = (uint16_t)haystack_len;\n    } else if (real_needle_label == 0) {\n      content_len = (uint16_t)needle_len;\n    }\n\n    // l1 = src_label (source - for chaining or content dependencies)\n    // l2 = real_needle_label (may be symbolic string!)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = needle pointer (for concrete content retrieval)\n    // size = haystack length if haystack concrete, else needle length if needle concrete, else 0\n    dfsan_label label = dfsan_union(src_label, real_needle_label, __dfsan::fstrstr,\n                                    content_len,\n                                    (uint64_t)haystack,\n                                    (uint64_t)needle);\n\n    // Send concrete content (haystack or needle)\n    if (content_len > 0 && label) {\n      __taint_trace_memcmp(label);\n    }\n\n    *ret_label = label;\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memmem(const void *haystack, size_t haystacklen,\n                                                   const void *needle, size_t needlelen,\n                                                   dfsan_label haystack_label,\n                                                   dfsan_label haystacklen_label,\n                                                   dfsan_label needle_label,\n                                                   dfsan_label needlelen_label,\n                                                   dfsan_label *ret_label) {\n  void *ret = memmem(haystack, haystacklen, needle, needlelen);\n\n  // Use unified get_str_label_n for haystack and needle\n  // Pass haystacklen_label to handle fsubstr creation when haystacklen derives from a string op\n  dfsan_label src_label =\n      get_str_label_n(haystack, haystack_label, haystacklen, haystacklen_label);\n\n  // Use unified get_str_label_n for needle\n  dfsan_label real_needle_label =\n      get_str_label_n(needle, needle_label, needlelen, needlelen_label);\n\n  if (src_label != 0 || real_needle_label != 0) {\n    // Determine which operand is concrete and set size accordingly\n    uint16_t content_len = 0;\n    if (src_label == 0) {\n      content_len = (uint16_t)haystacklen;\n    } else if (real_needle_label == 0) {\n      content_len = (uint16_t)needlelen;\n    }\n\n    // l1 = src_label (haystack content)\n    // l2 = real_needle_label (needle content - may be symbolic!)\n    // op1 = haystack pointer (for concrete content retrieval)\n    // op2 = needle pointer (for concrete content retrieval)\n    // size = haystack length if haystack concrete, else needle length if needle concrete, else 0\n    dfsan_label label = dfsan_union(src_label, real_needle_label, __dfsan::fstrstr,\n                                    content_len,\n                                    (uint64_t)haystack, (uint64_t)needle);\n\n    // Send concrete content (haystack or needle)\n    if (content_len > 0 && label) {\n      __taint_trace_memcmp(label);\n    }\n    *ret_label = label;\n    // Store the result pointer to recover symbolic length\n    if (ret) {\n      taint_set_str_indexof_label(ret, *ret_label);\n    }\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_connect(\n    int sockfd, const struct sockaddr *addr, socklen_t addrlen,\n    dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,\n    dfsan_label *ret_label) {\n  int ret = connect(sockfd, addr, addrlen);\n  if (ret == 0 || errno == EINPROGRESS || errno == EALREADY) {\n    taint_set_socket(addr, addrlen, sockfd);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recv(\n    int sockfd, void *buf, size_t leng, int flags, dfsan_label sockfd_label,\n    dfsan_label buf_label, dfsan_label leng_label, dfsan_label flags_label,\n    dfsan_label *ret_label) {\n  __taint_check_bounds(buf_label, (uptr)buf, leng_label, leng);\n  if (leng_label)\n    __taint_solve_bounds(buf_label, (uint64_t)buf, leng_label, leng, 0, 1, 0, 0);\n  internal_memset(buf, 0, leng);\n  ssize_t ret = recv(sockfd, buf, leng, flags);\n#if AIXCC_HACK\n  ssize_t readed = strlen((char *)buf);\n  if (ret == 0 && readed > 0) ret = readed; // we actually readed something\n#endif\n  if (ret > 0) {\n    off_t offset = taint_get_socket(sockfd);\n    if (offset >= 0) {\n      AOUT(\"recv: fd = %d, offset = %ld, ret = %ld\\n\", sockfd, offset, ret);\n      for (ssize_t i = 0; i < ret; i++) {\n        dfsan_set_label(dfsan_create_label(offset + i), (char *)buf + i, 1);\n      }\n      taint_update_socket_offset(sockfd, ret);\n    } else {\n      // clear the label?\n      dfsan_set_label(0, buf, ret);\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvfrom(\n    int sockfd, void *buf, size_t leng, int flags, struct sockaddr *src_addr,\n    socklen_t *addrlen, dfsan_label sockfd_label, dfsan_label buf_label,\n    dfsan_label leng_label, dfsan_label flags_label, dfsan_label src_addr_label,\n    dfsan_label addrlen_label, dfsan_label *ret_label) {\n  socklen_t alen = 0;\n  __taint_check_bounds(buf_label, (uptr)buf, leng_label, leng);\n  if (leng_label)\n    __taint_solve_bounds(buf_label, (uint64_t)buf, leng_label, leng, 0, 1, 0, 0);\n  internal_memset(buf, 0, leng);\n\n  ssize_t ret = recvfrom(sockfd, buf, leng, flags, src_addr, &alen);\n#if AIXCC_HACK\n  ssize_t readed = strlen((char *)buf);\n  if (ret == 0 && readed > 0) ret = readed; // we actually readed something\n#endif\n  if (ret > 0) {\n    off_t offset = taint_get_socket(sockfd);\n    if (offset >= 0) {\n      for (ssize_t i = 0; i < ret; i++) {\n        dfsan_set_label(dfsan_create_label(offset + i), (char *)buf + i, 1);\n      }\n      taint_update_socket_offset(sockfd, ret);\n    } else {\n      // clear the label?\n      dfsan_set_label(0, buf, ret);\n    }\n  }\n  if (src_addr) { dfsan_set_label(0, src_addr, alen); }\n  if (addrlen) { *addrlen = alen; }\n  *ret_label = 0;\n  return ret;\n}\n\nstatic void taint_handle_msg(int sockfd, struct msghdr *msg, size_t msg_len) {\n  // clear labels\n  if (msg->msg_name) {\n    dfsan_set_label(0, msg->msg_name, msg->msg_namelen);\n  }\n  if (msg->msg_control) {\n    dfsan_set_label(0, msg->msg_control, msg->msg_controllen);\n  }\n  off_t offset = taint_get_socket(sockfd);\n  for (size_t i = 0, bytes_written = msg_len; bytes_written > 0; ++i) {\n    assert(i < msg->msg_iovlen);\n    struct iovec *iov = &msg->msg_iov[i];\n    size_t iov_written =\n        bytes_written < iov->iov_len ? bytes_written : iov->iov_len;\n    if (offset >= 0) {\n      for (size_t j = 0; j < iov_written; ++j) {\n        dfsan_set_label(dfsan_create_label(offset + j), (char *)iov->iov_base + j, 1);\n      }\n      taint_update_socket_offset(sockfd, iov_written);\n      offset += iov_written;\n    } else {\n      dfsan_set_label(0, iov->iov_base, iov_written);\n    }\n    bytes_written -= iov_written;\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvmsg(\n    int sockfd, struct msghdr *msg, int flags, dfsan_label sockfd_label,\n    dfsan_label msg_label, dfsan_label flags_label, dfsan_label *ret_label) {\n  ssize_t ret = recvmsg(sockfd, msg, flags);\n  if (ret >= 0) {\n    taint_handle_msg(sockfd, msg, ret);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int __dfsw_recvmmsg(\n    int sockfd, struct mmsghdr *msgvec, unsigned int vlen, int flags,\n    struct timespec *timeout, dfsan_label sockfd_label,\n    dfsan_label msgvec_label, dfsan_label vlen_label, dfsan_label flags_label,\n    dfsan_label timeout_label, dfsan_label *ret_label) {\n  int ret = recvmmsg(sockfd, msgvec, vlen, flags, timeout);\n  for (int i = 0; i < ret; ++i) {\n    dfsan_set_label(0, &msgvec[i].msg_len, sizeof(msgvec[i].msg_len));\n    taint_handle_msg(sockfd, &msgvec[i].msg_hdr, msgvec[i].msg_len);\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_socketpair(int domain, int type, int protocol, int sv[2],\n                  dfsan_label domain_label, dfsan_label type_label,\n                  dfsan_label protocol_label, dfsan_label sv_label,\n                  dfsan_label *ret_label) {\n  int ret = socketpair(domain, type, protocol, sv);\n  *ret_label = 0;\n  if (ret == 0) {\n    dfsan_set_label(0, sv, sizeof(*sv) * 2);\n  }\n  return ret;\n}\n\n// Type of the trampoline function passed to the custom version of\n// dfsan_set_write_callback.\ntypedef void (*write_trampoline_t)(\n    void *callback,\n    int fd, const void *buf, ssize_t count,\n    dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label);\n\n// Calls to dfsan_set_write_callback() set the values in this struct.\n// Calls to the custom version of write() read (and invoke) them.\nstatic struct {\n  write_trampoline_t write_callback_trampoline = nullptr;\n  void *write_callback = nullptr;\n} write_callback_info;\n\nSANITIZER_INTERFACE_ATTRIBUTE void\n__dfsw_dfsan_set_write_callback(\n    write_trampoline_t write_callback_trampoline,\n    void *write_callback,\n    dfsan_label write_callback_label,\n    dfsan_label *ret_label) {\n  write_callback_info.write_callback_trampoline = write_callback_trampoline;\n  write_callback_info.write_callback = write_callback;\n  *ret_label = 0;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_write(int fd, const void *buf, size_t count,\n             dfsan_label fd_label, dfsan_label buf_label,\n             dfsan_label count_label, dfsan_label *ret_label) {\n  if (write_callback_info.write_callback) {\n    write_callback_info.write_callback_trampoline(\n        write_callback_info.write_callback,\n        fd, buf, count,\n        fd_label, buf_label, count_label);\n  }\n\n  *ret_label = 0;\n  int ret = write(fd, buf, count);\n#if AIXCC_HACK\n  if (buf && tainted.buf && !internal_memcmp(buf, tainted.buf, count)) {\n    AOUT(\"Closing aixcc pipefd %d\\n\", fd);\n    close(fd);\n  }\n#endif\n  return ret;\n}\n\n} // extern \"C\"\n\n// Type used to extract a dfsan_label with va_arg()\ntypedef int dfsan_label_va;\n\n// Formats a chunk either a constant string or a single format directive (e.g.,\n// '%.3f').\nstruct Formatter {\n  Formatter(char *str_, const char *fmt_, size_t size_)\n      : str(str_), str_off(0), size(size_), fmt_start(fmt_), fmt_cur(fmt_),\n        width(-1) {}\n\n  int format() {\n    char *tmp_fmt = build_format_string();\n    int retval =\n        snprintf(str + str_off, str_off < size ? size - str_off : 0, tmp_fmt,\n                 0 /* used only to avoid warnings */);\n    free(tmp_fmt);\n    return retval;\n  }\n\n  template <typename T> int format(T arg) {\n    char *tmp_fmt = build_format_string();\n    int retval;\n    if (width >= 0) {\n      retval = snprintf(str + str_off, str_off < size ? size - str_off : 0,\n                        tmp_fmt, width, arg);\n    } else {\n      retval = snprintf(str + str_off, str_off < size ? size - str_off : 0,\n                        tmp_fmt, arg);\n    }\n    free(tmp_fmt);\n    return retval;\n  }\n\n  char *build_format_string() {\n    size_t fmt_size = fmt_cur - fmt_start + 1;\n    char *new_fmt = (char *)malloc(fmt_size + 1);\n    assert(new_fmt);\n    internal_memcpy(new_fmt, fmt_start, fmt_size);\n    new_fmt[fmt_size] = '\\0';\n    return new_fmt;\n  }\n\n  char *str_cur() { return str + str_off; }\n\n  size_t num_written_bytes(int retval) {\n    if (retval < 0) {\n      return 0;\n    }\n\n    size_t num_avail = str_off < size ? size - str_off : 0;\n    if (num_avail == 0) {\n      return 0;\n    }\n\n    size_t num_written = retval;\n    // A return value of {v,}snprintf of size or more means that the output was\n    // truncated.\n    if (num_written >= num_avail) {\n      num_written -= num_avail;\n    }\n\n    return num_written;\n  }\n\n  char *str;\n  size_t str_off;\n  size_t size;\n  const char *fmt_start;\n  const char *fmt_cur;\n  int width;\n};\n\n// Formats the input and propagates the input labels to the output. The output\n// is stored in 'str'. 'size' bounds the number of output bytes. 'format' and\n// 'ap' are the format string and the list of arguments for formatting. Returns\n// the return value vsnprintf would return.\n//\n// The function tokenizes the format string in chunks representing either a\n// constant string or a single format directive (e.g., '%.3f') and formats each\n// chunk independently into the output string. This approach allows to figure\n// out which bytes of the output string depends on which argument and thus to\n// propagate labels more precisely.\n//\n// WARNING: This implementation does not support conversion specifiers with\n// positional arguments.\nstatic int format_buffer(char *str, size_t size, const char *fmt,\n                         dfsan_label *va_labels, dfsan_label *ret_label,\n                         va_list ap) {\n  Formatter formatter(str, fmt, size);\n\n  while (*formatter.fmt_cur) {\n    formatter.fmt_start = formatter.fmt_cur;\n    formatter.width = -1;\n    int retval = 0;\n\n    if (*formatter.fmt_cur != '%') {\n      // Ordinary character. Consume all the characters until a '%' or the end\n      // of the string.\n      for (; *(formatter.fmt_cur + 1) && *(formatter.fmt_cur + 1) != '%';\n           ++formatter.fmt_cur) {}\n      retval = formatter.format();\n      dfsan_set_label(0, formatter.str_cur(),\n                      formatter.num_written_bytes(retval));\n    } else {\n      // Conversion directive. Consume all the characters until a conversion\n      // specifier or the end of the string.\n      bool end_fmt = false;\n      for (; *formatter.fmt_cur && !end_fmt; ) {\n        switch (*++formatter.fmt_cur) {\n        case 'd':\n        case 'i':\n        case 'o':\n        case 'u':\n        case 'x':\n        case 'X':\n          switch (*(formatter.fmt_cur - 1)) {\n          case 'h':\n            // Also covers the 'hh' case (since the size of the arg is still\n            // an int).\n            retval = formatter.format(va_arg(ap, int));\n            break;\n          case 'l':\n            if (formatter.fmt_cur - formatter.fmt_start >= 2 &&\n                *(formatter.fmt_cur - 2) == 'l') {\n              retval = formatter.format(va_arg(ap, long long int));\n            } else {\n              retval = formatter.format(va_arg(ap, long int));\n            }\n            break;\n          case 'q':\n            retval = formatter.format(va_arg(ap, long long int));\n            break;\n          case 'j':\n            retval = formatter.format(va_arg(ap, intmax_t));\n            break;\n          case 'z':\n          case 't':\n            retval = formatter.format(va_arg(ap, size_t));\n            break;\n          default:\n            retval = formatter.format(va_arg(ap, int));\n          }\n          //dfsan_set_label(*va_labels++, formatter.str_cur(),\n          //                formatter.num_written_bytes(retval));\n          end_fmt = true;\n          break;\n\n        case 'a':\n        case 'A':\n        case 'e':\n        case 'E':\n        case 'f':\n        case 'F':\n        case 'g':\n        case 'G':\n          if (*(formatter.fmt_cur - 1) == 'L') {\n            retval = formatter.format(va_arg(ap, long double));\n          } else {\n            retval = formatter.format(va_arg(ap, double));\n          }\n          //dfsan_set_label(*va_labels++, formatter.str_cur(),\n          //                formatter.num_written_bytes(retval));\n          end_fmt = true;\n          break;\n\n        case 'c':\n          retval = formatter.format(va_arg(ap, int));\n          //dfsan_set_label(*va_labels++, formatter.str_cur(),\n          //                formatter.num_written_bytes(retval));\n          end_fmt = true;\n          break;\n\n        case 's': {\n          char *arg = va_arg(ap, char *);\n          retval = formatter.format(arg);\n          va_labels++;\n          internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg),\n                          sizeof(dfsan_label) *\n                              formatter.num_written_bytes(retval));\n          end_fmt = true;\n          break;\n        }\n\n        case 'p':\n          retval = formatter.format(va_arg(ap, void *));\n          //dfsan_set_label(*va_labels++, formatter.str_cur(),\n          //                formatter.num_written_bytes(retval));\n          end_fmt = true;\n          break;\n\n        case 'n': {\n          int *ptr = va_arg(ap, int *);\n          *ptr = (int)formatter.str_off;\n          va_labels++;\n          dfsan_set_label(0, ptr, sizeof(ptr));\n          end_fmt = true;\n          break;\n        }\n\n        case '%':\n          retval = formatter.format();\n          dfsan_set_label(0, formatter.str_cur(),\n                          formatter.num_written_bytes(retval));\n          end_fmt = true;\n          break;\n\n        case '*':\n          formatter.width = va_arg(ap, int);\n          va_labels++;\n          break;\n\n        default:\n          break;\n        }\n      }\n    }\n\n    if (retval < 0) {\n      return retval;\n    }\n\n    formatter.fmt_cur++;\n    formatter.str_off += retval;\n  }\n\n  *ret_label = 0;\n\n  // Number of bytes written in total.\n  return formatter.str_off;\n}\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,\n                   dfsan_label format_label, dfsan_label *va_labels,\n                   dfsan_label *ret_label, ...) {\n  va_list ap;\n  va_start(ap, ret_label);\n  int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, ap);\n  va_end(ap);\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_snprintf(char *str, size_t size, const char *format,\n                    dfsan_label str_label, dfsan_label size_label,\n                    dfsan_label format_label, dfsan_label *va_labels,\n                    dfsan_label *ret_label, ...) {\n  va_list ap;\n  va_start(ap, ret_label);\n  int ret = format_buffer(str, size, format, va_labels, ret_label, ap);\n  va_end(ap);\n  *ret_label = 0;\n  return ret;\n}\n\n// Default empty implementations (weak). Users should redefine them.\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *,\n                             u32 *) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr *beg,\n                             const uptr *end) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp1, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp2, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp1,\n                             void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp2,\n                             void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp4,\n                             void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp8,\n                             void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_switch, void) {}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_open(const char *path, int oflags, dfsan_label path_label,\n            dfsan_label flag_label, dfsan_label *va_labels,\n            dfsan_label *ret_label, ...) {\n  va_list args;\n  va_start(args, ret_label);\n  int fd = open(path, oflags, args);\n  va_end(args);\n\n  if (fd)\n    taint_set_file(AT_FDCWD, path, fd);\n  *ret_label = 0;\n  return fd;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_openat(int dirfd, const char *path, int oflags, dfsan_label dirfd_label,\n              dfsan_label path_label, dfsan_label flag_label,\n              dfsan_label *va_labels, dfsan_label *ret_label, ...) {\n  va_list args;\n  va_start(args, ret_label);\n  int fd = openat(dirfd, path, oflags, args);\n  va_end(args);\n\n  if (fd)\n    taint_set_file(dirfd, path, fd);\n  *ret_label = 0;\n  return fd;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE FILE *\n__dfsw_fopen(const char *filename, const char *mode,\n             dfsan_label filename_label, dfsan_label mode_label,\n             dfsan_label *ret_label) {\n  FILE *ret = fopen(filename, mode);\n  if (ret) {\n    AOUT(\"%d fd is fopened\\n\", fileno(ret));\n    taint_set_file(AT_FDCWD, filename, fileno(ret));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE FILE *\n__dfsw_fopen64(const char *filename, const char *mode,\n               dfsan_label filename_label, dfsan_label mode_label,\n               dfsan_label *ret_label) {\n  FILE *ret = fopen64(filename, mode);\n  if (ret) {\n    AOUT(\"%d fd is fopened\\n\", fileno(ret));\n    taint_set_file(AT_FDCWD, filename, fileno(ret));\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE FILE *\n__dfsw_freopen(const char *filename, const char *mode,\n               FILE *stream, dfsan_label filename_label,\n               dfsan_label mode_label, dfsan_label stream_label,\n               dfsan_label *ret_label) {\n  FILE *ret = freopen(filename, mode, stream);\n  if (ret)\n    taint_set_file(AT_FDCWD, filename, fileno(ret));\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_close(int fd, dfsan_label fd_label, dfsan_label *ret_label) {\n  taint_close_file(fd);\n  *ret_label = 0;\n  return close(fd);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_fclose(FILE *fp, dfsan_label fp_label, dfsan_label *ret_label) {\n  int fd = fileno(fp);\n  int ret = fclose(fp);\n  if (!ret) taint_close_file(fd);\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE size_t\n__dfsw_fread(void *ptr, size_t size, size_t nmemb, FILE *stream,\n             dfsan_label ptr_label, dfsan_label size_label,\n             dfsan_label nmemb_label, dfsan_label stream_label,\n             dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t tfsize = taint_get_file(fd);\n  off_t offset = ftell(stream);\n  *ret_label = 0;\n#if 0\n  // check taint file size\n  if (tfsize && (tfsize < offset + (size * nmemb))) {\n    // if smaller than a tainted offset, enlarge\n    dfsan_label offset_label = taint_get_offset_label();\n    if (offset_label) {\n      AOUT(\"fread(%u,%u) from tainted offset %lld\\n\", size, nmemb, offset);\n      // instead of read, write\n      internal_memset(ptr, 0, size * nmemb);\n      fwrite(ptr, size, nmemb, stream);\n      // update taint\n      for (size_t i = 0; i < size * nmemb; i++) {\n        dfsan_set_label(dfsan_create_label(offset + i), (char *)ptr + i, 1);\n      }\n      return nmemb; // directly return\n    }\n  }\n#endif\n  __taint_check_bounds(ptr_label, (uptr)ptr, nmemb_label, size * nmemb);\n  if (nmemb_label)\n    __taint_solve_bounds(ptr_label, (uint64_t)ptr, nmemb_label, nmemb, 0, size, 0, 0);\n  size_t ret = fread(ptr, size, nmemb, stream);\n  AOUT(\"fread(%lu,%lu) = %ld, off = %ld\\n\", size, nmemb, ret, offset);\n  if (ret) {\n    if (tfsize) {\n      for (size_t i = 0; i < ret * size; i++) {\n        dfsan_set_label(get_label_for(fd, offset + i), (char *)ptr + i, 1);\n      }\n      // for (size_t i = ret * size; i < size * nmemb; i++) {\n      //   dfsan_set_label(-1, (char *)ptr + i, 1);\n      // }\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n    } else {\n      dfsan_set_label(0, ptr, ret * size);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE size_t\n__dfsw_fread_unlocked(\n             void *ptr, size_t size, size_t nmemb, FILE *stream,\n             dfsan_label ptr_label, dfsan_label size_label,\n             dfsan_label nmemb_label, dfsan_label stream_label,\n             dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t tfsize = taint_get_file(fd);\n  off_t offset = ftell(stream);\n  *ret_label = 0;\n#if 0\n  // check taint file size\n  if (tfsize && (tfsize < offset + (size * nmemb))) {\n    // if smaller than a tainted offset, enlarge\n    dfsan_label offset_label = taint_get_offset_label();\n    if (offset_label) {\n      AOUT(\"fread(%u,%u) from tainted offset %lld\\n\", size, nmemb, offset);\n      // instead of read, write\n      internal_memset(ptr, 0, size * nmemb);\n      fwrite(ptr, size, nmemb, stream);\n      // update taint\n      for (size_t i = 0; i < size * nmemb; i++) {\n        dfsan_set_label(dfsan_create_label(offset + i), (char *)ptr + i, 1);\n      }\n      return nmemb; // directly return\n    }\n  }\n#endif\n  __taint_check_bounds(ptr_label, (uptr)ptr, nmemb_label, size * nmemb);\n  if (nmemb_label)\n    __taint_solve_bounds(ptr_label, (uint64_t)ptr, nmemb_label, nmemb, 0, size, 0, 0);\n  size_t ret = fread_unlocked(ptr, size, nmemb, stream);\n  AOUT(\"fread(%lu,%lu) = %ld, off = %ld\\n\", size, nmemb, ret, offset);\n  if (ret) {\n    if (tfsize) {\n      for (size_t i = 0; i < ret * size; i++) {\n        dfsan_set_label(get_label_for(fd, offset + i), (char *)ptr + i, 1);\n      }\n      // for (size_t i = ret * size; i < nmemb * size; i++) {\n      //   dfsan_set_label(-1, (char *)ptr + i, 1);\n      // }\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n    } else {\n      dfsan_set_label(0, ptr, ret * size);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t\n__dfsw_getline(char **lineptr, size_t *n, FILE *stream,\n               dfsan_label lineptr_label, dfsan_label n_label,\n               dfsan_label stream_label, dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  ssize_t ret = getline(lineptr, n, stream);\n  *ret_label = 0;\n  if (ret) {\n    if (taint_get_file(fd)) {\n      // including a terminating null byte\n      for (ssize_t i = 0; i < ret; i++) {\n        void *addr = (*lineptr) + i;\n        dfsan_set_label(get_label_for(fd, offset + i), addr, 1);\n      }\n      dfsan_set_label(0, (*lineptr) + ret, 1);\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n      // FIXME: set the label for the ptr to track the buffer size\n    } else {\n      dfsan_set_label(0, *lineptr, ret + 1);\n    }\n  }\n  return ret;\n}\n\n// ssize_t getdelim(char **lineptr, size_t *n, int delim, FILE *stream);\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t\n__dfsw_getdelim(char **lineptr, size_t *n, int delim, FILE *stream,\n                dfsan_label buf_label, dfsan_label size_label,\n                dfsan_label delim_label, dfsan_label stream_label,\n                dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  ssize_t ret = getdelim(lineptr, n, delim, stream);\n  *ret_label = 0;\n  if (ret) {\n    if (taint_get_file(fd)) {\n      // including a terminating null byte\n      for(ssize_t i = 0; i < ret; i++) {\n        void *addr = (*lineptr) + i;\n        dfsan_set_label(get_label_for(fd, offset + i), addr, 1);\n        // FIXME: set the label for the ptr to track the buffer size\n      }\n      dfsan_set_label(0, (*lineptr) + ret, 1);\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n    } else {\n      dfsan_set_label(0, *lineptr, ret + 1);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE ssize_t\n__dfsw___getdelim(char **lineptr, size_t *n, int delim, FILE *stream,\n                  dfsan_label buf_label, dfsan_label size_label,\n                  dfsan_label delim_label, dfsan_label stream_label,\n                  dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  ssize_t ret = __getdelim(lineptr, n, delim, stream);\n  *ret_label = 0;\n  if (ret) {\n    if (taint_get_file(fd)) {\n      for(ssize_t i = 0; i < ret; i++) {\n        void *addr = (*lineptr) + i;\n        dfsan_set_label(get_label_for(fd, offset + i), addr, 1);\n      }\n      dfsan_set_label(0, (*lineptr) + ret, 1);\n      // *ret_label = dfsan_union(0, 0, fsize, sizeof(ret) * 8, offset, 0);\n      // FIXME: set the label for the ptr to track the buffer size\n    } else {\n      dfsan_set_label(0, *lineptr, ret + 1);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE char*\n__dfsw_gets(char *str, dfsan_label str_label, dfsan_label *ret_label) {\n  off_t offset = ftell(stdin);\n  // gets discard until c11\n  char *ret = fgets(str, sizeof(str), stdin);\n  if (ret && taint_get_file(0)) {\n    for (off_t i = 0; i <= strlen(ret); i++)\n      dfsan_set_label(get_label_for(0, offset + i), ret + i, 1);\n    *ret_label = str_label;\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_utmpxname(const char *file, dfsan_label file_label, dfsan_label *ret_label) {\n  if (is_taint_file(file)) {\n    set_utmp_offset(0);\n  }\n  int ret = utmpxname(file);\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void\n__dfsw_setutxent(void) {\n  if (is_utmp_taint())\n    set_utmp_offset(0);\n  setutxent();\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE struct utmpx *\n__dfsw_getutxent(dfsan_label *ret_label) {\n  struct utmpx *ret = getutxent();\n  *ret_label = 0;\n  if (ret && is_utmp_taint()) {\n    off_t offset = get_utmp_offset();\n    for (size_t i = 0; i < sizeof(struct utmpx); i++) {\n      dfsan_set_label(get_label_for(-1, offset + i), (char *)ret + i, 1);\n    }\n    set_utmp_offset(offset + sizeof(struct utmpx));\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_fgets(char *s, int size, FILE *stream, dfsan_label s_label,\n                   dfsan_label size_label, dfsan_label stream_label,\n                   dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  __taint_check_bounds(s_label, (uptr)s, size_label, size);\n  if (size_label)\n    __taint_solve_bounds(s_label, (uint64_t)s, size_label, size, 0, 1, 0, 0);\n  char *ret = fgets(s, size, stream);\n  if (ret) {\n    if (taint_get_file(fd)) {\n      // including terminating \\0\n      for (size_t i = 0; i < strlen(ret); i++) {\n        char *buf = s + i;\n        dfsan_set_label(get_label_for(fd, offset + i), buf, 1);\n      }\n      dfsan_set_label(0, s + strlen(ret), 1);\n      // for(int i = strlen(ret) + 1; i < size; i++) {\n      //   char *buf = s + i;\n      //   dfsan_set_label(-1, buf, 1);\n      // }\n    } else {\n      dfsan_set_label(0, s, strlen(ret) + 1);\n    }\n    *ret_label = s_label;\n  } else *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nchar *__dfsw_fgets_unlocked(char *s, int size, FILE *stream, dfsan_label s_label,\n                   dfsan_label size_label, dfsan_label stream_label,\n                   dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  __taint_check_bounds(s_label, (uptr)s, size_label, size);\n  if (size_label)\n    __taint_solve_bounds(s_label, (uint64_t)s, size_label, size, 0, 1, 0, 0);\n  char *ret = fgets_unlocked(s, size, stream);\n  if (ret) {\n    if (taint_get_file(fd)) {\n      // including terminating \\0\n      for (size_t i = 0; i < strlen(ret); i++) {\n        char *buf = s + i;\n        dfsan_set_label(get_label_for(fd, offset + i), buf, 1);\n      }\n      dfsan_set_label(0, s + strlen(ret), 1);\n      // for(int i = strlen(ret) + 1; i < size; i++) {\n      //   char *buf = s + i;\n      //   dfsan_set_label(-1, buf, 1);\n      // }\n    } else {\n      dfsan_set_label(0, s, strlen(ret) + 1);\n    }\n    *ret_label = s_label;\n  } else {\n    *ret_label = 0;\n  }\n  return ret;\n}\n\nstatic inline void __taint_check_malloc_size(size_t size, dfsan_label size_label) {\n  if (size_label && flags().solve_ub) {\n    AOUT(\"*alloc size: %lu = %d\\n\", size, size_label);\n    // -fsanitize=unsigned-integer-overflow\n    dfsan_label os = dfsan_union(0, size_label, (bveq << 8) | ICmp, 64, 0, size);\n    __taint_trace_cond(os, 0, UndefinedCheck, ub_integer_overflow);\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void *\n__dfsw_realloc(void *ptr, size_t new_size,\n               dfsan_label ptr_label, dfsan_label new_size_label,\n               dfsan_label *ret_label) {\n  __taint_check_malloc_size(new_size, new_size_label);\n  void *ret = malloc(new_size);\n  *ret_label = 0;\n\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * new_size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, new_size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + new_size);\n      *ret_label = bound;\n    }\n  }\n\n  if (ptr) {\n    if (ret) {\n      // copy old labels\n      size_t size = malloc_usable_size(ptr);\n      size = size < new_size ? size : new_size;\n      internal_memcpy(ret, ptr, size);\n      internal_memcpy(shadow_for(ret), shadow_for(ptr), sizeof(dfsan_label) * size);\n    }\n    if (flags().trace_bounds) {\n      // mark old buffer as freed without truely free it\n      dfsan_label_info *info = dfsan_get_label_info(ptr_label);\n      if (info->op != Alloca) {\n        AOUT(\"WARNING: wrong ptr op %d = %d\\n\", ptr_label, info->op);\n        // Die();\n      } else info->op = Free;\n    } else {\n      free(ptr);\n    }\n  }\n\n  if (flags().trace_bounds) {\n    AOUT(\"old ptr: %p = %d, new size: %lu = %d, new ptr: %p = %d\\n\", ptr, ptr_label,\n        new_size, new_size_label, ret, *ret_label);\n  }\n\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void *\n__dfsw___libc_realloc(void *ptr, size_t new_size,\n                      dfsan_label ptr_label, dfsan_label new_size_label,\n                      dfsan_label *ret_label) {\n  __taint_check_malloc_size(new_size, new_size_label);\n  void *ret = malloc(new_size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * new_size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, new_size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + new_size);\n      *ret_label = bound;\n    }\n  }\n\n  if (ptr) {\n    if (ret) {\n      // copy old labels\n      size_t size = malloc_usable_size(ptr);\n      size = size < new_size ? size : new_size;\n      internal_memcpy(ret, ptr, size);\n      internal_memcpy(shadow_for(ret), shadow_for(ptr), sizeof(dfsan_label) * size);\n    }\n    if (flags().trace_bounds) {\n      // mark old buffer as freed without truely free it\n      dfsan_label_info *info = dfsan_get_label_info(ptr_label);\n      if (info->op != Alloca) {\n        AOUT(\"WARNING: wrong ptr op %d = %d\\n\", ptr_label, info->op);\n        // Die();\n      } else info->op = Free;\n    } else {\n      free(ptr);\n    }\n  }\n\n  if (flags().trace_bounds) {\n    AOUT(\"old ptr: %p = %d, new size: %lu = %d, new ptr: %p = %d\\n\", ptr, ptr_label,\n        new_size, new_size_label, ret, *ret_label);\n  }\n\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_reallocarray(void *ptr, size_t nmemb, size_t new_size,\n                          dfsan_label ptr_label, dfsan_label nmemb_label,\n                          dfsan_label new_size_label, dfsan_label *ret_label) {\n  if (nmemb_label != 0 || new_size_label != 0) {\n    dfsan_label byte_size = dfsan_union(nmemb_label, new_size_label, Mul,\n        64, nmemb, new_size);\n    __taint_check_malloc_size(nmemb * new_size, byte_size);\n  }\n  void *ret = calloc(nmemb, new_size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * new_size * nmemb);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(nmemb_label, new_size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + (new_size * nmemb));\n      *ret_label = bound;\n    }\n  }\n\n  if (ptr) {\n    if (ret) {\n      // copy old labels\n      size_t size = malloc_usable_size(ptr);\n      size = size < new_size ? size : new_size * nmemb;\n      internal_memcpy(ret, ptr, size);\n      internal_memcpy(shadow_for(ret), shadow_for(ptr), sizeof(dfsan_label) * size);\n    }\n    if (flags().trace_bounds) {\n      // mark old buffer as freed without truely free it\n      dfsan_label_info *info = dfsan_get_label_info(ptr_label);\n      if (info->op != Alloca) {\n        AOUT(\"WARNING: wrong ptr op %d = %d\\n\", ptr_label, info->op);\n        // Die();\n      } else info->op = Free;\n    } else {\n      free(ptr);\n    }\n  }\n\n  if (flags().trace_bounds) {\n    AOUT(\"old ptr: %p = %d, new size: %lu = %d, new ptr: %p = %d\\n\", ptr, ptr_label,\n        new_size, new_size_label, ret, *ret_label);\n  }\n\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw___libc_reallocarray(void *ptr, size_t nmemb, size_t new_size,\n                                 dfsan_label ptr_label, dfsan_label nmemb_label,\n                                 dfsan_label new_size_label, dfsan_label *ret_label) {\n  if (nmemb_label != 0 || new_size_label != 0) {\n    dfsan_label byte_size = dfsan_union(nmemb_label, new_size_label, Mul,\n        64, nmemb, new_size);\n    __taint_check_malloc_size(nmemb * new_size, byte_size);\n  }\n  void *ret = calloc(nmemb, new_size);\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * new_size * nmemb);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(nmemb_label, new_size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + (new_size * nmemb));\n      *ret_label = bound;\n    }\n  }\n\n  if (ptr) {\n    if (ret) {\n      // copy old labels\n      size_t size = malloc_usable_size(ptr);\n      size = size < new_size ? size : new_size * nmemb;\n      internal_memcpy(ret, ptr, size);\n      internal_memcpy(shadow_for(ret), shadow_for(ptr), sizeof(dfsan_label) * size);\n    }\n    if (flags().trace_bounds) {\n      // mark old buffer as freed without truely free it\n      dfsan_label_info *info = dfsan_get_label_info(ptr_label);\n      if (info->op != Alloca) {\n        AOUT(\"WARNING: wrong ptr op %d = %d\\n\", ptr_label, info->op);\n        // Die();\n      } else info->op = Free;\n    } else {\n      free(ptr);\n    }\n  }\n\n  if (flags().trace_bounds) {\n    AOUT(\"old ptr: %p = %d, nmemb: %lu = %d, new size: %lu = %d, new ptr: %p = %d\\n\",\n        ptr, ptr_label, nmemb, nmemb_label, new_size, new_size_label, ret, *ret_label);\n  }\n\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_calloc(size_t nmemb, size_t size,\n                    dfsan_label nmemb_label, dfsan_label size_label,\n                    dfsan_label *ret_label) {\n  if (nmemb_label != 0 || size_label != 0) {\n    dfsan_label byte_size = dfsan_union(nmemb_label, size_label, Mul,\n        64, nmemb, size);\n    __taint_check_malloc_size(nmemb * size, byte_size);\n  }\n  void *ret = calloc(nmemb, size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size * nmemb);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(nmemb_label, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + (size * nmemb));\n      *ret_label = bound;\n      AOUT(\"nmemb: %lu = %d, size: %lu = %d, addr: %p = %d\\n\", nmemb, nmemb_label,\n          size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw___libc_calloc(size_t nmemb, size_t size,\n                           dfsan_label nmemb_label, dfsan_label size_label,\n                           dfsan_label *ret_label) {\n  if (nmemb_label != 0 || size_label != 0) {\n    dfsan_label byte_size = dfsan_union(nmemb_label, size_label, Mul,\n        64, nmemb, size);\n    __taint_check_malloc_size(nmemb * size, byte_size);\n  }\n  void *ret = calloc(nmemb, size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size * nmemb);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(nmemb_label, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + (size * nmemb));\n      *ret_label = bound;\n      AOUT(\"nmemb: %lu = %d, size: %lu = %d, addr: %p = %d\\n\", nmemb, nmemb_label,\n          size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_malloc(size_t size, dfsan_label size_label,\n                   dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = malloc(size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw___libc_malloc(size_t size, dfsan_label size_label,\n                           dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = malloc(size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_aligned_alloc(size_t alignment, size_t size,\n                           dfsan_label alignment_label, dfsan_label size_label,\n                           dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = aligned_alloc(alignment, size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __dfsw_posix_memalign(void **memptr, size_t alignment, size_t size,\n                          dfsan_label memptr_label, dfsan_label alignment_label,\n                          dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  int ret = posix_memalign(memptr, alignment, size);\n  *ret_label = 0;\n  if (!ret && memptr && *memptr) {\n    internal_memset(shadow_for(*memptr), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(*memptr) * 8,\n          (uint64_t)(*memptr), (uint64_t)(*memptr) + size);\n      dfsan_set_label(bound, memptr, sizeof(*memptr));\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, *memptr, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_valloc(size_t size, dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = valloc(size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw___libc_valloc(size_t size, dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = valloc(size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_memalign(size_t alignment, size_t size, dfsan_label alignment_label,\n                      dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = memalign(alignment, size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw___libc_memalign(size_t alignment, size_t size, dfsan_label alignment_label,\n                             dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = memalign(alignment, size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw_pvalloc(size_t size, dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = pvalloc(size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__dfsw___libc_pvalloc(size_t size, dfsan_label size_label, dfsan_label *ret_label) {\n  __taint_check_malloc_size(size, size_label);\n  void *ret = pvalloc(size);\n  *ret_label = 0;\n  if (ret) {\n    internal_memset(shadow_for(ret), 0, sizeof(dfsan_label) * size);\n    if (flags().trace_bounds) {\n      dfsan_label bound = dfsan_union(0, size_label, Alloca, sizeof(ret) * 8,\n          (uint64_t)ret, (uint64_t)ret + size);\n      *ret_label = bound;\n      AOUT(\"length: %lu = %d, addr: %p = %d\\n\", size, size_label, ret, *ret_label);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void __dfsw_free(void *ptr, dfsan_label ptr_label) {\n  if (ptr && flags().trace_bounds) {\n    // don't really free, a hacky way to avoid reusing the address\n    // just mark as freed\n    AOUT(\"addr: %p = %d\\n\", ptr, ptr_label);\n    dfsan_label_info *info = dfsan_get_label_info(ptr_label);\n    if (info->op == Alloca) {\n      info->op = Free;\n    } else if (info->op == Free) {\n      void *addr = __builtin_return_address(0);\n      AOUT(\"WARNING: double free %p = %d @%p\\n\", ptr, ptr_label, addr);\n      __taint_trace_memerr(ptr_label, (uptr)ptr, 0, 0, F_MEMERR_FREE, addr);\n    } else {\n      AOUT(\"WARNING: wrong ptr op %d = %d @%p\\n\", ptr_label, info->op,\n           __builtin_return_address(0));\n      // Die();\n    }\n  } else {\n    free(ptr);\n  }\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid __dfsw___libc_free(void *ptr, dfsan_label ptr_label) {\n  if (ptr && flags().trace_bounds) {\n    // don't really free, a hacky way to avoid reusing the address\n    // just mark as freed\n    AOUT(\"addr: %p = %d\\n\", ptr, ptr_label);\n    dfsan_label_info *info = dfsan_get_label_info(ptr_label);\n    if (info->op == Alloca) {\n      info->op = Free;\n    } else if (info->op == Free) {\n      void *addr = __builtin_return_address(0);\n      AOUT(\"WARNING: double free %p = %d @%p\\n\", ptr, ptr_label, addr);\n      __taint_trace_memerr(ptr_label, (uptr)ptr, 0, 0, F_MEMERR_FREE, addr);\n    } else {\n      AOUT(\"WARNING: wrong ptr op %d = %d\\n\", ptr_label, info->op);\n      // Die();\n    }\n  } else {\n    free(ptr);\n  }\n}\n\nstatic dfsan_label taint_getc(int fd, off_t offset, int ret) {\n  if (ret != EOF && taint_get_file(fd)) {\n    dfsan_label label = label = dfsan_union(get_label_for(fd, offset), CONST_LABEL, ZExt, 32, 0, 0);\n    AOUT(\"%d label is readed by fgetc\\n\", label);\n  }\n  return 0;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_fgetc(FILE *stream, dfsan_label stream_label, dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  int ret = fgetc(stream);\n  *ret_label = taint_getc(fd, offset, ret);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_fgetc_unlocked(FILE *stream, dfsan_label stream_label, dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  int ret = fgetc(stream);\n  *ret_label = taint_getc(fd, offset, ret);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_getc(FILE *stream, dfsan_label stream_label, dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  int ret = getc(stream);\n  *ret_label = taint_getc(fd, offset, ret);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_getc_unlocked(FILE *stream, dfsan_label stream_label,\n                     dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  int ret = getc_unlocked(stream);\n  *ret_label = taint_getc(fd, offset, ret);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw__IO_getc(FILE *stream, dfsan_label stream_label, dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  off_t offset = ftell(stream);\n  int ret = getc(stream);\n  *ret_label = taint_getc(fd, offset, ret);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_getchar(dfsan_label *ret_label) {\n  off_t offset = ftell(stdin);\n  int ret = getchar();\n  *ret_label = taint_getc(0, offset, ret);\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE size_t\n__dfsw_mbrtowc(wchar_t *pwc, const char *s, size_t n, mbstate_t *ps,\n               dfsan_label pwc_label, dfsan_label s_label, dfsan_label\n               n_label, dfsan_label ps_label, dfsan_label *ret_label) {\n  *ret_label = 0;\n  size_t ret = mbrtowc(pwc, s, n, ps);\n  if (ret == (size_t)-1 || ret == (size_t)-2) return ret;\n  else if (pwc != 0) {\n    dfsan_label multibyte = dfsan_read_label(s, ret);\n    assert(false);\n    dfsan_store_label(multibyte, (void *)pwc, sizeof(wchar_t));\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void*\n__dfsw_mmap(void *start, size_t length, int prot, int flags, int fd,\n            off_t offset, dfsan_label start_label, dfsan_label len_label,\n            dfsan_label prot_label, dfsan_label flags_label,\n            dfsan_label fd_label, dfsan_label offset_label,\n            dfsan_label *ret_label) {\n  void *ret = mmap(start, length, prot, flags, fd, offset);\n  if (ret != MAP_FAILED) {\n    off_t fsize = taint_get_file(fd);\n    if (fsize) {\n      AOUT(\"mmap tainted file at addr %p, offset: %ld, length %lu \\n\",\n           ret, offset, length);\n      size_t tainted_length = (offset + length) > fsize ? (fsize - offset)\n                                                        : length;\n      for (size_t i = 0; i < tainted_length; i++)\n        dfsan_set_label(get_label_for(fd, offset + i), (char *)ret + i, 1);\n      for (size_t i = tainted_length; i < length; i++)\n        dfsan_set_label(-1, (char *)ret + i, 1);\n    } else {\n      dfsan_set_label(0, ret, length);\n    }\n  }\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_munmap(void *addr, size_t length, dfsan_label addr_label,\n              dfsan_label length_label, dfsan_label *ret_label) {\n  // clear sth\n  AOUT(\"munmap, addr %p, length %lu \\n\", addr, length);\n  int ret = munmap(addr, length);\n  if (!ret) dfsan_set_label(0, addr, length);\n  *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE off_t\n__dfsw_lseek(int fd, off_t offset, int whence, dfsan_label fd_label,\n             dfsan_label offset_label, dfsan_label whence_label,\n             dfsan_label *ret_label) {\n  off_t ret = lseek(fd, offset, whence);\n  if (ret != (off_t)-1) {\n    if (taint_get_file(fd)) {\n      taint_set_offset_label(offset_label);\n      if (offset_label) {\n        __taint_trace_offset(offset_label, offset, sizeof(offset) * 8);\n      }\n    }\n    *ret_label = offset_label;\n  } else *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE off64_t\n__dfsw_lseek64(int fd, off64_t offset, int whence, dfsan_label fd_label,\n               dfsan_label offset_label, dfsan_label whence_label,\n               dfsan_label *ret_label) {\n  off64_t ret = lseek64(fd, offset, whence);\n  if (ret != (off64_t)-1) {\n    if (taint_get_file(fd)) {\n      taint_set_offset_label(offset_label);\n      if (offset_label) {\n        __taint_trace_offset(offset_label, offset, sizeof(offset) * 8);\n      }\n    }\n    *ret_label = offset_label;\n  } else *ret_label = 0;\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_fseek(FILE *stream, long offset, int whence, dfsan_label stream_label,\n             dfsan_label offset_label, dfsan_label whence_label,\n             dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  int ret = fseek(stream, offset, whence);\n  *ret_label = 0;\n  if (ret == 0 && taint_get_file(fd)) {\n    taint_set_offset_label(offset_label);\n    if (offset_label) {\n      __taint_trace_offset(offset_label, offset, sizeof(offset) * 8);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_fseeko(FILE *stream, off_t offset, int whence, dfsan_label stream_label,\n             dfsan_label offset_label, dfsan_label whence_label,\n             dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  int ret = fseeko(stream, offset, whence);\n  *ret_label = 0;\n  if (ret == 0 && taint_get_file(fd)) {\n    taint_set_offset_label(offset_label);\n    if (offset_label) {\n      __taint_trace_offset(offset_label, offset, sizeof(offset) * 8);\n    }\n  }\n  return ret;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE int\n__dfsw_fseeko64(FILE *stream, off64_t offset, int whence, dfsan_label stream_label,\n             dfsan_label offset_label, dfsan_label whence_label,\n             dfsan_label *ret_label) {\n  int fd = fileno(stream);\n  int ret = fseeko64(stream, offset, whence);\n  *ret_label = 0;\n  if (ret == 0 && taint_get_file(fd)) {\n    taint_set_offset_label(offset_label);\n    if (offset_label) {\n      __taint_trace_offset(offset_label, offset, sizeof(offset) * 8);\n    }\n  }\n  return ret;\n}\n\n}  // extern \"C\"\n"
  },
  {
    "path": "runtime/dfsan/dfsan_flags.inc",
    "content": "//===-- dfsan_flags.inc -----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// DFSan runtime flags.\n//\n//===----------------------------------------------------------------------===//\n#ifndef DFSAN_FLAG\n# error \"Define DFSAN_FLAG prior to including this file!\"\n#endif\n\n// DFSAN_FLAG(Type, Name, DefaultValue, Description)\n// See COMMON_FLAG in sanitizer_flags.inc for more details.\n\nDFSAN_FLAG(bool, warn_unimplemented, true,\n           \"Whether to warn on unimplemented functions.\")\nDFSAN_FLAG(bool, warn_nonzero_labels, false,\n           \"Whether to warn on unimplemented functions.\")\nDFSAN_FLAG(\n    bool, strict_data_dependencies, true,\n    \"Whether to propagate labels only when there is an obvious data dependency\"\n    \"(e.g., when comparing strings, ignore the fact that the output of the\"\n    \"comparison might be data-dependent on the content of the strings). This\"\n    \"applies only to the custom functions defined in 'custom.c'.\")\nDFSAN_FLAG(const char *, dump_labels_at_exit, \"\", \"The path of the file where \"\n                                                  \"to dump the labels when the \"\n                                                  \"program terminates.\")\nDFSAN_FLAG(const char *, taint_file, \"\", \"The path of the file which \"\n                                         \"will be tainted.\")\nDFSAN_FLAG(const char *, taint_socket, \"\", \"The network source which \"\n                                          \"will be tainted.\")\nDFSAN_FLAG(const char *, union_table, \"union.txt\", \"union table.\")\nDFSAN_FLAG(int, shm_fd, -1, \"shared union table.\")\nDFSAN_FLAG(int, pipe_fd, -1, \"communication fd.\")\nDFSAN_FLAG(bool, trace_bounds, false, \"trace bounds info.\")\nDFSAN_FLAG(bool, trace_fsize, false, \"trace file size.\")\nDFSAN_FLAG(bool, exit_on_memerror, true, \"terminate on memory error.\")\nDFSAN_FLAG(bool, solve_ub, false, \"solve undefined behavior.\")\nDFSAN_FLAG(bool, debug, false, \"Print debug output.\")\nDFSAN_FLAG(const char *, output_dir, \".\", \"The path for output file.\")\nDFSAN_FLAG(int, instance_id, 0, \"instance id for multi-instance fuzzing.\")\nDFSAN_FLAG(int, session_id, 0, \"session/round id.\")\nDFSAN_FLAG(bool, force_stdin, false, \"force tainting stdin.\")\nDFSAN_FLAG(bool, enum_gep, false, \"enable GEP index enumeration.\")\nDFSAN_FLAG(int, string_map_capacity, 256, \"initial capacity for string label maps.\")\n"
  },
  {
    "path": "runtime/dfsan/dfsan_interceptors.cpp",
    "content": "//===-- dfsan_interceptors.cc ---------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of DataFlowSanitizer.\n//\n// Interceptors for standard library functions.\n//===----------------------------------------------------------------------===//\n\n#include <sys/syscall.h>\n#include <unistd.h>\n\n#include \"dfsan.h\"\n#include \"interception/interception.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n\nusing namespace __sanitizer;\n\nnamespace {\n\nstatic bool interceptors_initialized;\n\nvoid ReleaseShadowMemoryPagesToOS(void *addr, SIZE_T length) {\n  uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);\n  void *end_addr =\n      (void *)((uptr)addr + RoundUpTo(length, GetPageSizeCached()));\n  uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr);\n  ReleaseMemoryPagesToOS(beg_shadow_addr, end_shadow_addr);\n}\n\n}\n\nINTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,\n            int fd, OFF_T offset) {\n  void *res = nullptr;\n  \n  // interceptors_initialized is set to true during preinit_array, when we're\n  // single-threaded.  So we don't need to worry about accessing it atomically.\n  if (!interceptors_initialized)\n    res = (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);\n  else\n    res = REAL(mmap)(addr, length, prot, flags, fd, offset);\n\n  if (res != (void*)-1)\n    ReleaseShadowMemoryPagesToOS(res, length);\n  return res;\n}\n\nINTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,\n            int fd, OFF64_T offset) {\n  void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset);\n  if (res != (void*)-1)\n    ReleaseShadowMemoryPagesToOS(res, length);\n  return res;\n}\n\nINTERCEPTOR(int, munmap, void *addr, SIZE_T length) {\n  int res = REAL(munmap)(addr, length);\n  if (res != -1) {\n    ReleaseShadowMemoryPagesToOS(addr, length);\n  }\n  return res;\n}\n\nnamespace __dfsan {\nvoid InitializeInterceptors() {\n  CHECK(!interceptors_initialized);\n\n  INTERCEPT_FUNCTION(mmap);\n  INTERCEPT_FUNCTION(mmap64);\n  INTERCEPT_FUNCTION(munmap);\n\n  interceptors_initialized = true;\n}\n}  // namespace __dfsan\n"
  },
  {
    "path": "runtime/dfsan/dfsan_platform.h",
    "content": "//===-- dfsan_platform.h ----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of DataFlowSanitizer.\n//\n// Platform specific information for DFSan.\n//===----------------------------------------------------------------------===//\n\n#ifndef DFSAN_PLATFORM_H\n#define DFSAN_PLATFORM_H\n\nnamespace __dfsan {\n\n#if defined(__x86_64__)\nstruct Mapping {\n  static const uptr kShadowAddr = 0x10000;\n  static const uptr kHashTableAddr = 0x400000000000;\n  static const uptr kUnionTableAddr = 0x400100000000;\n  static const uptr kAppAddr = 0x700000040000;\n  static const uptr kAppBaseAddr = 0x700000000000;\n  static const uptr kShadowMask = ~0x700000000000;\n};\n#elif defined(__mips64)\nstruct Mapping {\n  static const uptr kShadowAddr = 0x10000;\n  static const uptr kHashTableAddr = 0x2000000000;\n  static const uptr kUnionTableAddr = 0x2100000000;\n  static const uptr kAppAddr = 0xF000008000;\n  static const uptr kAppBaseAddr = 0xF000000000;\n  static const uptr kShadowMask = ~0xF000000000;\n};\n#elif defined(__aarch64__)\nstruct Mapping39 {\n  static const uptr kShadowAddr = 0x10000;\n  static const uptr kHashTableAddr = 0x1000000000;\n  static const uptr kUnionTableAddr = 0x1100000000;\n  static const uptr kAppAddr = 0x7000008000;\n  static const uptr kAppBaseAddr = 0x7000000000;\n  static const uptr kShadowMask = ~0x7800000000;\n};\n\nstruct Mapping42 {\n  static const uptr kShadowAddr = 0x10000;\n  static const uptr kHashTableAddr = 0x8000000000;\n  static const uptr kUnionTableAddr = 0x8100000000;\n  static const uptr kAppAddr = 0x3ff00008000;\n  static const uptr kAppBaseAddr = 0x3ff00000000;\n  static const uptr kShadowMask = ~0x3c000000000;\n};\n\nstruct Mapping48 {\n  static const uptr kShadowAddr = 0x10000;\n  static const uptr kHashTableAddr = 0x8000000000;\n  static const uptr kUnionTableAddr = 0x8100000000;\n  static const uptr kAppAddr = 0xffff00008000;\n  static const uptr kAppBaseAddr = 0xffff00000000;\n  static const uptr kShadowMask = ~0xfffff0000000;\n};\n\nextern int vmaSize;\n# define DFSAN_RUNTIME_VMA 1\n#else\n# error \"DFSan not supported for this platform!\"\n#endif\n\nenum MappingType {\n  MAPPING_SHADOW_ADDR,\n  MAPPING_UNION_TABLE_ADDR,\n  MAPPING_APP_ADDR,\n  MAPPING_APP_BASE_ADDR,\n  MAPPING_SHADOW_MASK,\n  MAPPING_HASH_TABLE_ADDR\n};\n\ntemplate<typename Mapping, int Type>\nuptr MappingImpl(void) {\n  switch (Type) {\n    case MAPPING_SHADOW_ADDR: return Mapping::kShadowAddr;\n    case MAPPING_UNION_TABLE_ADDR: return Mapping::kUnionTableAddr;\n    case MAPPING_APP_ADDR: return Mapping::kAppAddr;\n    case MAPPING_APP_BASE_ADDR: return Mapping::kAppBaseAddr;\n    case MAPPING_SHADOW_MASK: return Mapping::kShadowMask;\n    case MAPPING_HASH_TABLE_ADDR: return Mapping::kHashTableAddr;\n  }\n}\n\ntemplate<int Type>\nuptr MappingArchImpl(void) {\n#ifdef __aarch64__\n  switch (vmaSize) {\n    case 39: return MappingImpl<Mapping39, Type>();\n    case 42: return MappingImpl<Mapping42, Type>();\n    case 48: return MappingImpl<Mapping48, Type>();\n  }\n  DCHECK(0);\n  return 0;\n#else\n  return MappingImpl<Mapping, Type>();\n#endif\n}\n\nALWAYS_INLINE\nuptr ShadowAddr() {\n  return MappingArchImpl<MAPPING_SHADOW_ADDR>();\n}\n\nALWAYS_INLINE\nuptr UnionTableAddr() {\n  return MappingArchImpl<MAPPING_UNION_TABLE_ADDR>();\n}\n\nALWAYS_INLINE\nuptr AppAddr() {\n  return MappingArchImpl<MAPPING_APP_ADDR>();\n}\n\nALWAYS_INLINE\nuptr ShadowMask() {\n  return MappingArchImpl<MAPPING_SHADOW_MASK>();\n}\n\nALWAYS_INLINE\nuptr HashTableAddr() {\n  return MappingArchImpl<MAPPING_HASH_TABLE_ADDR>();\n}\n\nALWAYS_INLINE\nuptr AppBaseAddr() {\n  return MappingArchImpl<MAPPING_APP_BASE_ADDR>();\n}\n\n}  // namespace __dfsan\n\n#endif\n"
  },
  {
    "path": "runtime/dfsan/done_abilist.txt",
    "content": "##########\n# added by user\n#########\nfun:log10=functional\nfun:__ctype_toupper_loc=discard\nfun:isatty=discard\nfun:longjmp=discard\nfun:wait=discard\n\nfun:main=uninstrumented\nfun:main=discard\n\n###############################################################################\n# DFSan interface functions\n###############################################################################\nfun:dfsan_union=uninstrumented\nfun:dfsan_union=discard\nfun:dfsan_create_label=uninstrumented\nfun:dfsan_create_label=discard\nfun:dfsan_set_label=uninstrumented\nfun:dfsan_set_label=discard\nfun:dfsan_add_label=uninstrumented\nfun:dfsan_add_label=discard\nfun:dfsan_get_label=uninstrumented\nfun:dfsan_get_label=custom\nfun:dfsan_read_label=uninstrumented\nfun:dfsan_read_label=discard\nfun:dfsan_get_label_count=uninstrumented\nfun:dfsan_get_label_count=discard\nfun:dfsan_get_label_info=uninstrumented\nfun:dfsan_get_label_info=discard\nfun:dfsan_has_label=uninstrumented\nfun:dfsan_has_label=discard\nfun:dfsan_has_label_with_desc=uninstrumented\nfun:dfsan_has_label_with_desc=discard\nfun:dfsan_set_write_callback=uninstrumented\nfun:dfsan_set_write_callback=custom\nfun:dfsan_shadow_for=discard\n\nfun:dfsan_fun_init=uninstrumented\nfun:dfsan_fun_init=discard\nfun:dfsan_fun_fini=uninstrumented\nfun:dfsan_fun_fini=discard\n\n###############################################################################\n# libunwind\n###############################################################################\n\nfun:__clang_call_terminate=uninstrumented\nfun:__clang_call_terminate=discard\nfun:__unw_getcontext=uninstrumented\nfun:__unw_getcontext=discard\nfun:unw_getcontext=uninstrumented\nfun:unw_getcontext=discard\nfun:_ZN9libunwind13Registers_x866jumptoEv=uninstrumented\nfun:_ZN9libunwind13Registers_x866jumptoEv=discard\nfun:_ZN9libunwind16Registers_x86_646jumptoEv=uninstrumented\nfun:_ZN9libunwind16Registers_x86_646jumptoEv=discard\nfun:__libunwind_Registers_x86_64_jumpto=uninstrumented\nfun:__libunwind_Registers_x86_64_jumpto=discard\n\n###############################################################################\n# glibc\n###############################################################################\n\n# alloc and free\nfun:aligned_alloc=custom\nfun:calloc=custom\nfun:free=custom\nfun:malloc=custom\nfun:memalign=custom\nfun:posix_memalign=custom\nfun:pvalloc=custom\nfun:realloc=custom\nfun:reallocarray=custom\nfun:valloc=custom\nfun:__libc_calloc=custom\nfun:__libc_free=custom\nfun:__libc_malloc=custom\nfun:__libc_memalign=custom\nfun:__libc_pvalloc=custom\nfun:__libc_realloc=custom\nfun:__libc_reallocarray=custom\nfun:__libc_valloc=custom\n\n# Functions that return a value that depends on the input, but the output might\n# not be necessarily data-dependent on the input.\nfun:isalpha=functional\nfun:isdigit=functional\nfun:isprint=functional\nfun:isxdigit=functional\nfun:isalnum=functional\nfun:ispunct=functional\nfun:isspace=functional\n\n# Functions that return a value that is data-dependent on the input.\nfun:__isinf=functional\nfun:__isinff=functional\nfun:__signbit=functional\nfun:__signbitf=functional\nfun:__signbitl=functional\nfun:abs=functional\nfun:btowc=functional\nfun:exp=functional\nfun:exp2=functional\nfun:expf=functional\nfun:expl=functional\nfun:fabs=functional\nfun:finite=functional\nfun:finitef=functional\nfun:finitel=functional\nfun:floor=functional\nfun:fmod=functional\nfun:frexp=functional\nfun:frexpf=functional\nfun:frexpl=functional\nfun:isinf=functional\nfun:isinff=functional\nfun:isinfl=functional\nfun:isnan=functional\nfun:isnanf=functional\nfun:isnanl=functional\nfun:lrint=functional\nfun:lrintf=functional\nfun:lrintl=functional\nfun:llrint=functional\nfun:llrintf=functional\nfun:llrintl=functional\nfun:log=functional\nfun:log1p=functional\nfun:log1pf=functional\nfun:log1pl=functional\nfun:log2=functional\nfun:log2f=functional\nfun:log2l=functional\nfun:modf=functional\nfun:nextafter=functional\nfun:nextafterf=functional\nfun:nextafterl=functional\nfun:nexttoward=functional\nfun:nexttowardf=functional\nfun:nexttowardl=functional\nfun:pow=functional\nfun:powf=functional\nfun:powl=functional\nfun:round=functional\nfun:sqrt=functional\nfun:sqrtf=functional\nfun:wctob=functional\nfun:wctob=functional\n\n# Functions that produce an output that does not depend on the input (shadow is\n# zeroed automatically).\nfun:__assert_fail=discard\nfun:__cmsg_nxthdr=discard\nfun:__ctype_b_loc=discard\nfun:__cxa_atexit=discard\nfun:__errno_location=discard\nfun:__newlocale=discard\nfun:__sbrk=discard\nfun:__sigsetjmp=discard\nfun:__uselocale=discard\nfun:__wctype_l=discard\nfun:access=discard\nfun:alarm=discard\nfun:atexit=discard\nfun:bind=discard\nfun:chdir=discard\n# fun:close=discard\nfun:closedir=discard\nfun:connect=discard\nfun:dladdr=discard\nfun:dlclose=discard\nfun:epoll_create=discard\nfun:epoll_create1=discard\nfun:epoll_ctl=discard\n# fun:fclose=discard\nfun:feof=discard\nfun:ferror=discard\nfun:fflush=discard\nfun:fileno=discard\n# fun:fopen=discard\nfun:fprintf=discard\nfun:fputc=discard\nfun:fputc=discard\nfun:fputs=discard\nfun:fputs_unlocked=discard\n# fun:fseek=discard\n# fun:fseeko64=discard\nfun:ftell=discard\nfun:fwrite=discard\nfun:getenv=discard\nfun:getuid=discard\nfun:geteuid=discard\nfun:getpagesize=discard\nfun:getpid=discard\nfun:gettext=discard\nfun:kill=discard\nfun:listen=discard\n# fun:lseek=discard\nfun:mkdir=discard\n# fun:mmap=discard\n# fun:munmap=discard\nfun:ngettext=discard\n# fun:open=discard\n# fun:openat=discard\nfun:pipe=discard\nfun:posix_fadvise=discard\n# fun:posix_memalign=discard\nfun:prctl=discard\nfun:printf=discard\nfun:pthread_sigmask=discard\nfun:putc=discard\nfun:putchar=discard\nfun:puts=discard\nfun:rand=discard\nfun:random=discard\nfun:remove=discard\nfun:sched_getcpu=discard\nfun:sched_get_priority_max=discard\nfun:sched_setaffinity=discard\nfun:sched_yield=discard\nfun:sem_destroy=discard\nfun:sem_init=discard\nfun:sem_post=discard\nfun:sem_wait=discard\nfun:send=discard\nfun:sendmsg=discard\nfun:sendto=discard\nfun:setsockopt=discard\nfun:shutdown=discard\nfun:sleep=discard\nfun:socket=discard\nfun:strerror=discard\nfun:strspn=discard\nfun:strcspn=discard\nfun:symlink=discard\nfun:syscall=discard\nfun:unlink=discard\nfun:uselocale=discard\nfun:vfprintf=discard\n\n# Functions that produce output does not depend on the input (need to zero the\n# shadow manually).\nfun:_dl_get_tls_static_info=custom\nfun:clock_gettime=custom\nfun:dlopen=custom\nfun:epoll_wait=custom\nfun:fgets=custom\nfun:fstat=custom\nfun:getcwd=custom\nfun:get_current_dir_name=custom\nfun:getentropy=custom\nfun:gethostname=custom\nfun:getpeername=custom\nfun:getrlimit=custom\nfun:getrusage=custom\nfun:getsockname=custom\nfun:getsockopt=custom\nfun:nanosleep=custom\nfun:pread=custom\nfun:pread64=custom\nfun:read=custom\nfun:recv=custom\nfun:recvfrom=custom\nfun:recvmmsg=custom\nfun:recvmsg=custom\nfun:sigaltstack=custom\nfun:socketpair=custom\nfun:stat=custom\nfun:time=custom\n\n# Functions that produce an output that depend on the input (propagate the\n# shadow manually).\nfun:__strdup=custom\nfun:__strndup=custom\nfun:ctime_r=custom\nfun:inet_pton=custom\nfun:localtime_r=custom\nfun:memcpy=custom\nfun:memmove=custom\nfun:memset=custom\nfun:stpcpy=custom\nfun:strcat=custom\nfun:strcpy=custom\nfun:strdup=custom\nfun:strncat=custom\nfun:strncpy=custom\nfun:strndup=custom\n\n# transformation (fatoi)\nfun:strtod=custom\nfun:strtol=custom\nfun:strtoll=custom\nfun:strtoul=custom\nfun:strtoull=custom\nfun:atoi=custom\nfun:atol=custom\nfun:atoll=custom\nfun:tolower=custom\nfun:toupper=custom\n\n# Functions that produce an output that is computed from the input, but is not\n# necessarily data dependent.\nfun:bcmp=custom\nfun:memchr=custom\nfun:memcmp=custom\nfun:memrchr=custom\nfun:strcasecmp=custom\nfun:strchr=custom\nfun:strcmp=custom\nfun:strlen=custom\nfun:strncasecmp=custom\nfun:strncmp=custom\nfun:strpbrk=custom\nfun:strrchr=custom\nfun:strstr=custom\nfun:strnstr=custom\n# not standard Linux\nfun:strnstr=uninstrumented\nfun:memmem=custom\n\n## from afl++\n# memcmp-like\nfun:CRYPTO_memcmp=memcmp\nfun:OPENSSL_memcmp=memcmp\nfun:memcmp_const_time=memcmp\nfun:memcmpct=memcmp\n\n# strcmp-like (fstrcmp)\nfun:xmlStrcmp=strcmp\nfun:xmlStrEqual=strcmp\nfun:g_strcmp0=strcmp\nfun:curl_strequal=strcmp\nfun:strcsequal=strcmp\nfun:stricmp=strcmp\nfun:ap_cstr_casecmp=strcmp\nfun:OPENSSL_strcasecmp=strcmp\nfun:xmlStrcasecmp=strcmp\nfun:g_strcasecmp=strcmp\nfun:g_ascii_strcasecmp=strcmp\nfun:Curl_strcasecompare=strcmp\nfun:Curl_safe_strcasecompare=strcmp\nfun:cmsstrcasecmp=strcmp\n\n# strncmp-like (fstrcmp)\nfun:xmlStrncmp=strncmp\nfun:curl_strnequal=strncmp\nfun:strnicmp=strncmp\nfun:ap_cstr_casecmpn=strncmp\nfun:OPENSSL_strncasecmp=strncmp\nfun:xmlStrncasecmp=strncmp\nfun:g_ascii_strncasecmp=strcmp\nfun:Curl_strncasecompare=strncmp\nfun:g_strncasecmp=strncmp\n\n# fstrchr\nfun:xmlStrchr=strchr\n\n# fstrrchr\n\n# strstr (fstrstr)\nfun:g_strstr_len=strstr\nfun:ap_strcasestr=strstr\nfun:xmlStrstr=strstr\nfun:xmlStrcasestr=strstr\n\n# fprefixof\nfun:g_str_has_prefix=prefixof\n\n# fsuffixof\nfun:g_str_has_suffix=suffixof\n\n# fsubstr\nfun:xmlStrsub=substr\n\n# Functions which take action based on global state, such as running a callback\n# set by a separate function.\nfun:write=custom\n\n# Functions that take a callback (wrap the callback manually).\nfun:dl_iterate_phdr=custom\n\nfun:getpwuid_r=custom\nfun:poll=custom\nfun:sched_getaffinity=custom\nfun:select=custom\nfun:sigemptyset=custom\nfun:sigaction=custom\n#fun:signal=custom\nfun:gettimeofday=custom\n\n# sprintf-like\nfun:sprintf=custom\nfun:snprintf=custom\n\n# TODO: custom\nfun:asprintf=discard\nfun:qsort=discard\n\n### synfuzz\nfun:__fxstat=custom\nfun:__getdelim=custom\nfun:__lxstat=custom\nfun:__xstat=custom\nfun:_IO_getc=custom\nfun:close=custom\nfun:fclose=custom\nfun:fgetc=custom\nfun:fgetc_unlocked=custom\nfun:fgets_unlocked=custom\nfun:fopen=custom\nfun:fopen64=custom\nfun:fread=custom\nfun:fread_unlocked=custom\nfun:freopen=custom\nfun:getchar=custom\nfun:getc=custom\nfun:getc_unlocked=custom\nfun:getdelim=custom\nfun:getline=custom\nfun:gets=custom\nfun:lstat=custom\nfun:mbrtowc=custom\nfun:mmap=custom\nfun:munmap=custom\nfun:mmap2=custom\nfun:open=custom\nfun:openat=custom\nfun:openat2=custom\nfun:lseek=custom\nfun:lseek64=custom\nfun:fseek=custom\nfun:fseeko=custom\nfun:fseeko64=custom\nfun:connect=custom\n\n# for LAVA\nfun:utmpxname=custom\nfun:getutxent=custom\nfun:setutxent=custom\n\n###############################################################################\n# pthread\n###############################################################################\nfun:__pthread_register_cancel=discard\nfun:__pthread_unregister_cancel=discard\nfun:pthread_attr_destroy=discard\nfun:pthread_attr_getaffinity_np=discard\nfun:pthread_attr_getdetachstate=discard\nfun:pthread_attr_getguardsize=discard\nfun:pthread_attr_getinheritsched=discard\nfun:pthread_attr_getschedparam=discard\nfun:pthread_attr_getschedpolicy=discard\nfun:pthread_attr_getscope=discard\nfun:pthread_attr_getstack=discard\nfun:pthread_attr_getstackaddr=discard\nfun:pthread_attr_getstacksize=discard\nfun:pthread_attr_init=discard\nfun:pthread_attr_setaffinity_np=discard\nfun:pthread_attr_setdetachstate=discard\nfun:pthread_attr_setguardsize=discard\nfun:pthread_attr_setinheritsched=discard\nfun:pthread_attr_setschedparam=discard\nfun:pthread_attr_setschedpolicy=discard\nfun:pthread_attr_setscope=discard\nfun:pthread_attr_setstack=discard\nfun:pthread_attr_setstackaddr=discard\nfun:pthread_attr_setstacksize=discard\nfun:pthread_equal=discard\nfun:pthread_getschedparam=discard\nfun:pthread_getspecific=discard\nfun:pthread_key_create=discard\nfun:pthread_key_delete=discard\nfun:pthread_mutex_destroy=discard\nfun:pthread_mutex_init=discard\nfun:pthread_mutex_lock=discard\nfun:pthread_mutex_trylock=discard\nfun:pthread_mutex_unlock=discard\nfun:pthread_mutexattr_destroy=discard\nfun:pthread_mutexattr_init=discard\nfun:pthread_mutexattr_settype=discard\nfun:pthread_rwlock_destroy=discard\nfun:pthread_rwlock_init=discard\nfun:pthread_rwlock_rdlock=discard\nfun:pthread_rwlock_timedrdlock=discard\nfun:pthread_rwlock_timedwrlock=discard\nfun:pthread_rwlock_tryrdlock=discard\nfun:pthread_rwlock_trywrlock=discard\nfun:pthread_rwlock_wrlock=discard\nfun:pthread_rwlock_unlock=discard\nfun:pthread_setschedparam=discard\nfun:pthread_setname_np=discard\nfun:pthread_once=discard\nfun:pthread_self=discard\nfun:pthread_setspecific=discard\n\n# Functions that take a callback (wrap the callback manually).\nfun:pthread_create=custom\n\n# Functions that produce output does not depend on the input (need to zero the\n# shadow manually).\nfun:pthread_join=custom\n\n###############################################################################\n# libffi/libgo\n###############################################################################\n# Functions that are written in asm or are called from asm.\nfun:ffi_call_unix64=uninstrumented\nfun:ffi_call_unix64=discard\nfun:ffi_closure_unix64_inner=uninstrumented\nfun:ffi_closure_unix64_inner=discard\nfun:ffi_closure_unix64=uninstrumented\nfun:ffi_closure_unix64=discard\nfun:__go_get_closure=uninstrumented\nfun:__go_get_closure=discard\nfun:__go_makefunc_can_recover=uninstrumented\nfun:__go_makefunc_can_recover=discard\nfun:__go_makefunc_returning=uninstrumented\nfun:__go_makefunc_returning=discard\nfun:reflect.MakeFuncStubGo=uninstrumented\nfun:reflect.MakeFuncStubGo=discard\nfun:reflect.makeFuncStub=uninstrumented\nfun:reflect.makeFuncStub=discard\n\n###############################################################################\n# lib/Fuzzer\n###############################################################################\n# Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp\nfun:__sanitizer_cov_trace_cmp1=custom\nfun:__sanitizer_cov_trace_cmp1=uninstrumented\nfun:__sanitizer_cov_trace_cmp2=custom\nfun:__sanitizer_cov_trace_cmp2=uninstrumented\nfun:__sanitizer_cov_trace_cmp4=custom\nfun:__sanitizer_cov_trace_cmp4=uninstrumented\nfun:__sanitizer_cov_trace_cmp8=custom\nfun:__sanitizer_cov_trace_cmp8=uninstrumented\nfun:__sanitizer_cov_trace_const_cmp1=custom\nfun:__sanitizer_cov_trace_const_cmp1=uninstrumented\nfun:__sanitizer_cov_trace_const_cmp2=custom\nfun:__sanitizer_cov_trace_const_cmp2=uninstrumented\nfun:__sanitizer_cov_trace_const_cmp4=custom\nfun:__sanitizer_cov_trace_const_cmp4=uninstrumented\nfun:__sanitizer_cov_trace_const_cmp8=custom\nfun:__sanitizer_cov_trace_const_cmp8=uninstrumented\n# Similar for __sanitizer_cov_trace_switch\nfun:__sanitizer_cov_trace_switch=custom\nfun:__sanitizer_cov_trace_switch=uninstrumented\n\n# Ignores all other __sanitizer callbacks.\nfun:__sanitizer_cov=uninstrumented\nfun:__sanitizer_cov=discard\nfun:__sanitizer_cov_module_init=uninstrumented\nfun:__sanitizer_cov_module_init=discard\nfun:__sanitizer_cov_with_check=uninstrumented\nfun:__sanitizer_cov_with_check=discard\nfun:__sanitizer_set_death_callback=uninstrumented\nfun:__sanitizer_set_death_callback=discard\nfun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented\nfun:__sanitizer_update_counter_bitset_and_clear_counters=discard\nfun:__sanitizer_cov_trace_pc*=uninstrumented\nfun:__sanitizer_cov_trace_pc*=discard\nfun:__sanitizer_cov_pcs_init=uninstrumented\nfun:__sanitizer_cov_pcs_init=discard\n\nfun:__sanitizer_get_current_allocated_bytes=uninstrumented\nfun:__sanitizer_get_current_allocated_bytes=discard\nfun:__sanitizer_get_heap_size=uninstrumented\nfun:__sanitizer_get_heap_size=discard\nfun:__sanitizer_get_free_bytes=uninstrumented\nfun:__sanitizer_get_free_bytes=discard\nfun:__sanitizer_get_unmapped_bytes=uninstrumented\nfun:__sanitizer_get_unmapped_bytes=discard\nfun:__sanitizer_get_estimated_allocated_size=uninstrumented\nfun:__sanitizer_get_estimated_allocated_size=discard\nfun:__sanitizer_get_ownership=uninstrumented\nfun:__sanitizer_get_ownership=discard\nfun:__sanitizer_get_allocated_size=uninstrumented\nfun:__sanitizer_get_allocated_size=discard\nfun:__sanitizer_print_stack_trace=uninstrumented\nfun:__sanitizer_print_stack_trace=discard\n\nfun:TcmallocSlab_Internal_PushBatch_FixedShift=uninstrumented\nfun:TcmallocSlab_Internal_PushBatch_FixedShift=discard\nfun:TcmallocSlab_Internal_PushBatch_FixedShift_VCPU=uninstrumented\nfun:TcmallocSlab_Internal_PushBatch_FixedShift_VCPU=discard\nfun:TcmallocSlab_Internal_PerCpuCmpxchg64=uninstrumented\nfun:TcmallocSlab_Internal_PerCpuCmpxchg64=discard\nfun:TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU=uninstrumented\nfun:TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU=discard\nfun:TcmallocSlab_Internal_PopBatch_FixedShift=uninstrumented\nfun:TcmallocSlab_Internal_PopBatch_FixedShift=discard\nfun:TcmallocSlab_Internal_PopBatch_FixedShift_VCPU=uninstrumented\nfun:TcmallocSlab_Internal_PopBatch_FixedShift_VCPU=discard\n\n# Ignores the dfsan wrappers.\nfun:__dfsw_*=uninstrumented\nfun:__dfsw_*=discard\n\n# Ignores taint wrappers.\nfun:__taint_*=uninstrumented\nfun:__taint_*=discard\n\n# Don't add extra parameters to the Fuzzer callback.\nfun:LLVMFuzzerTestOneInput=uninstrumented\nfun:__afl_manual_init=uninstrumented\nfun:__afl_manual_init=discard\n"
  },
  {
    "path": "runtime/dfsan/libc++_abilist.txt",
    "content": "fun:_ZNKSt10bad_typeid4whatEv=uninstrumented\nfun:_ZNKSt11logic_error4whatEv=uninstrumented\nfun:_ZNKSt12bad_any_cast4whatEv=uninstrumented\nfun:_ZNKSt12experimental15fundamentals_v112bad_any_cast4whatEv=uninstrumented\nfun:_ZNKSt13bad_exception4whatEv=uninstrumented\nfun:_ZNKSt13runtime_error4whatEv=uninstrumented\nfun:_ZNKSt16nested_exception14rethrow_nestedEv=uninstrumented\nfun:_ZNKSt18bad_variant_access4whatEv=uninstrumented\nfun:_ZNKSt19bad_optional_access4whatEv=uninstrumented\nfun:_ZNKSt20bad_array_new_length4whatEv=uninstrumented\nfun:_ZNKSt3__110__time_put8__do_putEPcRS1_PK2tmcc=uninstrumented\nfun:_ZNKSt3__110__time_put8__do_putEPwRS1_PK2tmcc=uninstrumented\nfun:_ZNKSt3__110error_code7messageEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb0EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIcLb1EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb0EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__110moneypunctIwLb1EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db15__decrementableEPKv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db15__find_c_from_iEPv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db15__subscriptableEPKvl=uninstrumented\nfun:_ZNKSt3__111__libcpp_db17__dereferenceableEPKv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db17__find_c_and_lockEPv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db22__less_than_comparableEPKvS2_=uninstrumented\nfun:_ZNKSt3__111__libcpp_db6unlockEv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db8__find_cEPv=uninstrumented\nfun:_ZNKSt3__111__libcpp_db9__addableEPKvl=uninstrumented\nfun:_ZNKSt3__112bad_weak_ptr4whatEv=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE12find_last_ofEPKcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE13find_first_ofEPKcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE16find_last_not_ofEPKcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE17find_first_not_ofEPKcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE2atEm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4copyEPcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4findEPKcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE4findEcm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE5rfindEPKcmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE5rfindEcm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7compareEPKc=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7compareEmmPKc=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7compareEmmPKcm=uninstrumented\nfun:_ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7compareEmmRKS5_mm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE12find_last_ofEPKwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE13find_first_ofEPKwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE16find_last_not_ofEPKwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE17find_first_not_ofEPKwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE2atEm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE4copyEPwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE4findEPKwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE4findEwm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE5rfindEPKwmm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE5rfindEwm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7compareEPKw=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7compareEmmPKw=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7compareEmmPKwm=uninstrumented\nfun:_ZNKSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7compareEmmRKS5_mm=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIcE10do_tolowerEPcPKc=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIcE10do_tolowerEc=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIcE10do_toupperEPcPKc=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIcE10do_toupperEc=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE10do_scan_isEtPKwS3_=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE10do_tolowerEPwPKw=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE10do_tolowerEw=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE10do_toupperEPwPKw=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE10do_toupperEw=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE11do_scan_notEtPKwS3_=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE5do_isEPKwS3_Pt=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE5do_isEtw=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE8do_widenEPKcS3_Pw=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE8do_widenEc=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE9do_narrowEPKwS3_cPc=uninstrumented\nfun:_ZNKSt3__112ctype_bynameIwE9do_narrowEwc=uninstrumented\nfun:_ZNKSt3__112strstreambuf6pcountEv=uninstrumented\nfun:_ZNKSt3__113random_device7entropyEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE5do_inER11__mbstate_tPKcS5_RS5_PDiS7_RS7_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE6do_outER11__mbstate_tPKDiS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDiE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE5do_inER11__mbstate_tPKcS5_RS5_PDsS7_RS7_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE6do_outER11__mbstate_tPKDsS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IDsE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE5do_inER11__mbstate_tPKcS5_RS5_PwS7_RS7_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE6do_outER11__mbstate_tPKwS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__114__codecvt_utf8IwE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__114collate_bynameIcE10do_compareEPKcS3_S3_S3_=uninstrumented\nfun:_ZNKSt3__114collate_bynameIcE12do_transformEPKcS3_=uninstrumented\nfun:_ZNKSt3__114collate_bynameIwE10do_compareEPKwS3_S3_S3_=uninstrumented\nfun:_ZNKSt3__114collate_bynameIwE12do_transformEPKwS3_=uninstrumented\nfun:_ZNKSt3__114error_category10equivalentERKNS_10error_codeEi=uninstrumented\nfun:_ZNKSt3__114error_category10equivalentEiRKNS_15error_conditionE=uninstrumented\nfun:_ZNKSt3__114error_category23default_error_conditionEi=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE5do_inER11__mbstate_tPKcS5_RS5_PDiS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE6do_outER11__mbstate_tPKDiS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb0EE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE5do_inER11__mbstate_tPKcS5_RS5_PDiS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE6do_outER11__mbstate_tPKDiS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDiLb1EE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE5do_inER11__mbstate_tPKcS5_RS5_PDsS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE6do_outER11__mbstate_tPKDsS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb0EE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE5do_inER11__mbstate_tPKcS5_RS5_PDsS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE6do_outER11__mbstate_tPKDsS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IDsLb1EE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE5do_inER11__mbstate_tPKcS5_RS5_PwS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE6do_outER11__mbstate_tPKwS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb0EE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE5do_inER11__mbstate_tPKcS5_RS5_PwS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE6do_outER11__mbstate_tPKwS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__115__codecvt_utf16IwLb1EE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__115basic_streambufIcNS_11char_traitsIcEEE6getlocEv=uninstrumented\nfun:_ZNKSt3__115basic_streambufIwNS_11char_traitsIwEEE6getlocEv=uninstrumented\nfun:_ZNKSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE3strEv=uninstrumented\nfun:_ZNKSt3__115error_condition7messageEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb0EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIcLb1EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb0EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE13do_neg_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE13do_pos_formatEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE14do_curr_symbolEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE14do_frac_digitsEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE16do_negative_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE16do_positive_signEv=uninstrumented\nfun:_ZNKSt3__117moneypunct_bynameIwLb1EE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__118__time_get_storageIcE15__do_date_orderEv=uninstrumented\nfun:_ZNKSt3__118__time_get_storageIwE15__do_date_orderEv=uninstrumented\nfun:_ZNKSt3__119__libcpp_debug_info4whatEv=uninstrumented\nfun:_ZNKSt3__119__shared_weak_count13__get_deleterERKSt9type_info=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE5do_inER11__mbstate_tPKcS5_RS5_PDiS7_RS7_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE6do_outER11__mbstate_tPKDiS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDiE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE5do_inER11__mbstate_tPKcS5_RS5_PDsS7_RS7_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE6do_outER11__mbstate_tPKDsS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IDsE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE10do_unshiftER11__mbstate_tPcS4_RS4_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE5do_inER11__mbstate_tPKcS5_RS5_PwS7_RS7_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE6do_outER11__mbstate_tPKwS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__120__codecvt_utf8_utf16IwE9do_lengthER11__mbstate_tPKcS5_m=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE3__XEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE3__cEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE3__rEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE3__xEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE7__am_pmEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE7__weeksEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIcE8__monthsEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE3__XEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE3__cEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE3__rEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE3__xEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE7__am_pmEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE7__weeksEv=uninstrumented\nfun:_ZNKSt3__120__time_get_c_storageIwE8__monthsEv=uninstrumented\nfun:_ZNKSt3__120__vector_base_commonILb1EE20__throw_length_errorEv=uninstrumented\nfun:_ZNKSt3__120__vector_base_commonILb1EE20__throw_out_of_rangeEv=uninstrumented\nfun:_ZNKSt3__121__basic_string_commonILb1EE20__throw_length_errorEv=uninstrumented\nfun:_ZNKSt3__121__basic_string_commonILb1EE20__throw_out_of_rangeEv=uninstrumented\nfun:_ZNKSt3__123__match_any_but_newlineIcE6__execERNS_7__stateIcEE=uninstrumented\nfun:_ZNKSt3__123__match_any_but_newlineIwE6__execERNS_7__stateIwEE=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem18directory_iterator13__dereferenceEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem28recursive_directory_iterator13__dereferenceEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem28recursive_directory_iterator5depthEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem28recursive_directory_iterator7optionsEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path10__filenameEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path11__extensionEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path11__root_nameEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path13__parent_pathEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path15__relative_pathEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path15__root_path_rawEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path16__root_directoryEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path16lexically_normalEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path18lexically_relativeERKS2_=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path3endEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path5beginEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path6__stemEv=uninstrumented\nfun:_ZNKSt3__14__fs10filesystem4path9__compareENS_17basic_string_viewIcNS_11char_traitsIcEEEE=uninstrumented\nfun:_ZNKSt3__15ctypeIcE10do_tolowerEPcPKc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE10do_tolowerEc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE10do_toupperEPcPKc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE10do_toupperEc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE8do_widenEPKcS3_Pc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE8do_widenEc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE9do_narrowEPKcS3_cPc=uninstrumented\nfun:_ZNKSt3__15ctypeIcE9do_narrowEcc=uninstrumented\nfun:_ZNKSt3__15ctypeIwE10do_scan_isEtPKwS3_=uninstrumented\nfun:_ZNKSt3__15ctypeIwE10do_tolowerEPwPKw=uninstrumented\nfun:_ZNKSt3__15ctypeIwE10do_tolowerEw=uninstrumented\nfun:_ZNKSt3__15ctypeIwE10do_toupperEPwPKw=uninstrumented\nfun:_ZNKSt3__15ctypeIwE10do_toupperEw=uninstrumented\nfun:_ZNKSt3__15ctypeIwE11do_scan_notEtPKwS3_=uninstrumented\nfun:_ZNKSt3__15ctypeIwE5do_isEPKwS3_Pt=uninstrumented\nfun:_ZNKSt3__15ctypeIwE5do_isEtw=uninstrumented\nfun:_ZNKSt3__15ctypeIwE8do_widenEPKcS3_Pw=uninstrumented\nfun:_ZNKSt3__15ctypeIwE8do_widenEc=uninstrumented\nfun:_ZNKSt3__15ctypeIwE9do_narrowEPKwS3_cPc=uninstrumented\nfun:_ZNKSt3__15ctypeIwE9do_narrowEwc=uninstrumented\nfun:_ZNKSt3__16locale4nameEv=uninstrumented\nfun:_ZNKSt3__16locale9has_facetERNS0_2idE=uninstrumented\nfun:_ZNKSt3__16locale9use_facetERNS0_2idE=uninstrumented\nfun:_ZNKSt3__16localeeqERKS0_=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE10do_unshiftERS1_PDuS4_RS4_=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE5do_inERS1_PKDuS5_RS5_PDiS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE6do_outERS1_PKDiS5_RS5_PDuS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDiDu11__mbstate_tE9do_lengthERS1_PKDuS5_m=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE10do_unshiftERS1_PcS4_RS4_=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE5do_inERS1_PKcS5_RS5_PDiS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE6do_outERS1_PKDiS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDic11__mbstate_tE9do_lengthERS1_PKcS5_m=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE10do_unshiftERS1_PDuS4_RS4_=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE5do_inERS1_PKDuS5_RS5_PDsS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE6do_outERS1_PKDsS5_RS5_PDuS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDsDu11__mbstate_tE9do_lengthERS1_PKDuS5_m=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE10do_unshiftERS1_PcS4_RS4_=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE5do_inERS1_PKcS5_RS5_PDsS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE6do_outERS1_PKDsS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIDsc11__mbstate_tE9do_lengthERS1_PKcS5_m=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE10do_unshiftERS1_PcS4_RS4_=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE5do_inERS1_PKcS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE6do_outERS1_PKcS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIcc11__mbstate_tE9do_lengthERS1_PKcS5_m=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE10do_unshiftERS1_PcS4_RS4_=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE11do_encodingEv=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE13do_max_lengthEv=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE16do_always_noconvEv=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE5do_inERS1_PKcS5_RS5_PwS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE6do_outERS1_PKwS5_RS5_PcS7_RS7_=uninstrumented\nfun:_ZNKSt3__17codecvtIwc11__mbstate_tE9do_lengthERS1_PKcS5_m=uninstrumented\nfun:_ZNKSt3__17collateIcE10do_compareEPKcS3_S3_S3_=uninstrumented\nfun:_ZNKSt3__17collateIcE12do_transformEPKcS3_=uninstrumented\nfun:_ZNKSt3__17collateIcE7do_hashEPKcS3_=uninstrumented\nfun:_ZNKSt3__17collateIwE10do_compareEPKwS3_S3_S3_=uninstrumented\nfun:_ZNKSt3__17collateIwE12do_transformEPKwS3_=uninstrumented\nfun:_ZNKSt3__17collateIwE7do_hashEPKwS3_=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRPv=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRb=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRd=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRe=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRf=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRl=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRm=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRt=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRx=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjRy=uninstrumented\nfun:_ZNKSt3__17num_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjS8_=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRPv=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRb=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRd=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRe=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRf=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRl=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRm=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRt=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRx=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjRy=uninstrumented\nfun:_ZNKSt3__17num_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjS8_=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcPKv=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcb=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcd=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEce=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcl=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcm=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcx=uninstrumented\nfun:_ZNKSt3__17num_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcy=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwPKv=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwb=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwd=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwe=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwl=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwm=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwx=uninstrumented\nfun:_ZNKSt3__17num_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwy=uninstrumented\nfun:_ZNKSt3__18ios_base6getlocEv=uninstrumented\nfun:_ZNKSt3__18messagesIcE6do_getEliiRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNKSt3__18messagesIcE7do_openERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERKNS_6localeE=uninstrumented\nfun:_ZNKSt3__18messagesIcE8do_closeEl=uninstrumented\nfun:_ZNKSt3__18messagesIwE6do_getEliiRKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEE=uninstrumented\nfun:_ZNKSt3__18messagesIwE7do_openERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERKNS_6localeE=uninstrumented\nfun:_ZNKSt3__18messagesIwE8do_closeEl=uninstrumented\nfun:_ZNKSt3__18numpunctIcE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__18numpunctIcE11do_truenameEv=uninstrumented\nfun:_ZNKSt3__18numpunctIcE12do_falsenameEv=uninstrumented\nfun:_ZNKSt3__18numpunctIcE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__18numpunctIcE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__18numpunctIwE11do_groupingEv=uninstrumented\nfun:_ZNKSt3__18numpunctIwE11do_truenameEv=uninstrumented\nfun:_ZNKSt3__18numpunctIwE12do_falsenameEv=uninstrumented\nfun:_ZNKSt3__18numpunctIwE16do_decimal_pointEv=uninstrumented\nfun:_ZNKSt3__18numpunctIwE16do_thousands_sepEv=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE10__get_hourERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE10__get_yearERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE11__get_am_pmERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE11__get_monthERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE11__get_year4ERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE11do_get_dateES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE11do_get_timeES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE11do_get_yearES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE12__get_minuteERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE12__get_secondERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE13__get_12_hourERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE13__get_percentERS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE13__get_weekdayERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE13do_date_orderEv=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE14do_get_weekdayES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE15__get_monthnameERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE16do_get_monthnameES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE17__get_weekdaynameERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE17__get_white_spaceERS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE18__get_day_year_numERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE3getES4_S4_RNS_8ios_baseERjP2tmPKcSC_=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_RNS_8ios_baseERjP2tmcc=uninstrumented\nfun:_ZNKSt3__18time_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE9__get_dayERiRS4_S4_RjRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE10__get_hourERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE10__get_yearERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE11__get_am_pmERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE11__get_monthERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE11__get_year4ERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE11do_get_dateES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE11do_get_timeES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE11do_get_yearES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE12__get_minuteERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE12__get_secondERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE13__get_12_hourERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE13__get_percentERS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE13__get_weekdayERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE13do_date_orderEv=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE14do_get_weekdayES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE15__get_monthnameERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE16do_get_monthnameES4_S4_RNS_8ios_baseERjP2tm=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE17__get_weekdaynameERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE17__get_white_spaceERS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE18__get_day_year_numERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE3getES4_S4_RNS_8ios_baseERjP2tmPKwSC_=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_RNS_8ios_baseERjP2tmcc=uninstrumented\nfun:_ZNKSt3__18time_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE9__get_dayERiRS4_S4_RjRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNKSt3__18time_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE3putES4_RNS_8ios_baseEcPK2tmPKcSC_=uninstrumented\nfun:_ZNKSt3__18time_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_RNS_8ios_baseEcPK2tmcc=uninstrumented\nfun:_ZNKSt3__18time_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE3putES4_RNS_8ios_baseEwPK2tmPKwSC_=uninstrumented\nfun:_ZNKSt3__18time_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_RNS_8ios_baseEwPK2tmcc=uninstrumented\nfun:_ZNKSt3__19money_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_bRNS_8ios_baseERjRNS_12basic_stringIcS3_NS_9allocatorIcEEEE=uninstrumented\nfun:_ZNKSt3__19money_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_getES4_S4_bRNS_8ios_baseERjRe=uninstrumented\nfun:_ZNKSt3__19money_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_bRNS_8ios_baseERjRNS_12basic_stringIwS3_NS_9allocatorIwEEEE=uninstrumented\nfun:_ZNKSt3__19money_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_getES4_S4_bRNS_8ios_baseERjRe=uninstrumented\nfun:_ZNKSt3__19money_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_bRNS_8ios_baseEcRKNS_12basic_stringIcS3_NS_9allocatorIcEEEE=uninstrumented\nfun:_ZNKSt3__19money_putIcNS_19ostreambuf_iteratorIcNS_11char_traitsIcEEEEE6do_putES4_bRNS_8ios_baseEce=uninstrumented\nfun:_ZNKSt3__19money_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_bRNS_8ios_baseEwRKNS_12basic_stringIwS3_NS_9allocatorIwEEEE=uninstrumented\nfun:_ZNKSt3__19money_putIwNS_19ostreambuf_iteratorIwNS_11char_traitsIwEEEEE6do_putES4_bRNS_8ios_baseEwe=uninstrumented\nfun:_ZNKSt8bad_cast4whatEv=uninstrumented\nfun:_ZNKSt9bad_alloc4whatEv=uninstrumented\nfun:_ZNKSt9exception4whatEv=uninstrumented\nfun:_ZNSt10bad_typeidC1Ev=uninstrumented\nfun:_ZNSt10bad_typeidC2Ev=uninstrumented\nfun:_ZNSt10bad_typeidD0Ev=uninstrumented\nfun:_ZNSt10bad_typeidD1Ev=uninstrumented\nfun:_ZNSt10bad_typeidD2Ev=uninstrumented\nfun:_ZNSt11logic_errorC1EPKc=uninstrumented\nfun:_ZNSt11logic_errorC1ERKNSt3__112basic_stringIcNS0_11char_traitsIcEENS0_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt11logic_errorC1ERKS_=uninstrumented\nfun:_ZNSt11logic_errorC2EPKc=uninstrumented\nfun:_ZNSt11logic_errorC2ERKNSt3__112basic_stringIcNS0_11char_traitsIcEENS0_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt11logic_errorC2ERKS_=uninstrumented\nfun:_ZNSt11logic_errorD0Ev=uninstrumented\nfun:_ZNSt11logic_errorD1Ev=uninstrumented\nfun:_ZNSt11logic_errorD2Ev=uninstrumented\nfun:_ZNSt11logic_erroraSERKS_=uninstrumented\nfun:_ZNSt11range_errorD0Ev=uninstrumented\nfun:_ZNSt11range_errorD1Ev=uninstrumented\nfun:_ZNSt11range_errorD2Ev=uninstrumented\nfun:_ZNSt12domain_errorD0Ev=uninstrumented\nfun:_ZNSt12domain_errorD1Ev=uninstrumented\nfun:_ZNSt12domain_errorD2Ev=uninstrumented\nfun:_ZNSt12experimental19bad_optional_accessD0Ev=uninstrumented\nfun:_ZNSt12experimental19bad_optional_accessD1Ev=uninstrumented\nfun:_ZNSt12experimental19bad_optional_accessD2Ev=uninstrumented\nfun:_ZNSt12length_errorD0Ev=uninstrumented\nfun:_ZNSt12length_errorD1Ev=uninstrumented\nfun:_ZNSt12length_errorD2Ev=uninstrumented\nfun:_ZNSt12out_of_rangeD0Ev=uninstrumented\nfun:_ZNSt12out_of_rangeD1Ev=uninstrumented\nfun:_ZNSt12out_of_rangeD2Ev=uninstrumented\nfun:_ZNSt13bad_exceptionD0Ev=uninstrumented\nfun:_ZNSt13bad_exceptionD1Ev=uninstrumented\nfun:_ZNSt13bad_exceptionD2Ev=uninstrumented\nfun:_ZNSt13exception_ptrC1ERKS_=uninstrumented\nfun:_ZNSt13exception_ptrC2ERKS_=uninstrumented\nfun:_ZNSt13exception_ptrD1Ev=uninstrumented\nfun:_ZNSt13exception_ptrD2Ev=uninstrumented\nfun:_ZNSt13exception_ptraSERKS_=uninstrumented\nfun:_ZNSt13runtime_errorC1EPKc=uninstrumented\nfun:_ZNSt13runtime_errorC1ERKNSt3__112basic_stringIcNS0_11char_traitsIcEENS0_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt13runtime_errorC1ERKS_=uninstrumented\nfun:_ZNSt13runtime_errorC2EPKc=uninstrumented\nfun:_ZNSt13runtime_errorC2ERKNSt3__112basic_stringIcNS0_11char_traitsIcEENS0_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt13runtime_errorC2ERKS_=uninstrumented\nfun:_ZNSt13runtime_errorD0Ev=uninstrumented\nfun:_ZNSt13runtime_errorD1Ev=uninstrumented\nfun:_ZNSt13runtime_errorD2Ev=uninstrumented\nfun:_ZNSt13runtime_erroraSERKS_=uninstrumented\nfun:_ZNSt14overflow_errorD0Ev=uninstrumented\nfun:_ZNSt14overflow_errorD1Ev=uninstrumented\nfun:_ZNSt14overflow_errorD2Ev=uninstrumented\nfun:_ZNSt15underflow_errorD0Ev=uninstrumented\nfun:_ZNSt15underflow_errorD1Ev=uninstrumented\nfun:_ZNSt15underflow_errorD2Ev=uninstrumented\nfun:_ZNSt16invalid_argumentD0Ev=uninstrumented\nfun:_ZNSt16invalid_argumentD1Ev=uninstrumented\nfun:_ZNSt16invalid_argumentD2Ev=uninstrumented\nfun:_ZNSt16nested_exceptionC1Ev=uninstrumented\nfun:_ZNSt16nested_exceptionC2Ev=uninstrumented\nfun:_ZNSt16nested_exceptionD0Ev=uninstrumented\nfun:_ZNSt16nested_exceptionD1Ev=uninstrumented\nfun:_ZNSt16nested_exceptionD2Ev=uninstrumented\nfun:_ZNSt19bad_optional_accessD0Ev=uninstrumented\nfun:_ZNSt19bad_optional_accessD1Ev=uninstrumented\nfun:_ZNSt19bad_optional_accessD2Ev=uninstrumented\nfun:_ZNSt20bad_array_new_lengthC1Ev=uninstrumented\nfun:_ZNSt20bad_array_new_lengthC2Ev=uninstrumented\nfun:_ZNSt20bad_array_new_lengthD0Ev=uninstrumented\nfun:_ZNSt20bad_array_new_lengthD1Ev=uninstrumented\nfun:_ZNSt20bad_array_new_lengthD2Ev=uninstrumented\nfun:_ZNSt3__110__time_getC1EPKc=uninstrumented\nfun:_ZNSt3__110__time_getC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__110__time_getC2EPKc=uninstrumented\nfun:_ZNSt3__110__time_getC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__110__time_getD1Ev=uninstrumented\nfun:_ZNSt3__110__time_getD2Ev=uninstrumented\nfun:_ZNSt3__110__time_putC1EPKc=uninstrumented\nfun:_ZNSt3__110__time_putC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__110__time_putC2EPKc=uninstrumented\nfun:_ZNSt3__110__time_putC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__110__time_putD1Ev=uninstrumented\nfun:_ZNSt3__110__time_putD2Ev=uninstrumented\nfun:_ZNSt3__110istrstreamD0Ev=uninstrumented\nfun:_ZNSt3__110istrstreamD1Ev=uninstrumented\nfun:_ZNSt3__110istrstreamD2Ev=uninstrumented\nfun:_ZNSt3__110ostrstreamD0Ev=uninstrumented\nfun:_ZNSt3__110ostrstreamD1Ev=uninstrumented\nfun:_ZNSt3__110ostrstreamD2Ev=uninstrumented\nfun:_ZNSt3__110to_wstringEd=uninstrumented\nfun:_ZNSt3__110to_wstringEe=uninstrumented\nfun:_ZNSt3__110to_wstringEf=uninstrumented\nfun:_ZNSt3__110to_wstringEi=uninstrumented\nfun:_ZNSt3__110to_wstringEj=uninstrumented\nfun:_ZNSt3__110to_wstringEl=uninstrumented\nfun:_ZNSt3__110to_wstringEm=uninstrumented\nfun:_ZNSt3__110to_wstringEx=uninstrumented\nfun:_ZNSt3__110to_wstringEy=uninstrumented\nfun:_ZNSt3__111__call_onceERVmPvPFvS2_E=uninstrumented\nfun:_ZNSt3__111__libcpp_db10__insert_cEPvPFPNS_8__c_nodeES1_S1_S3_E=uninstrumented\nfun:_ZNSt3__111__libcpp_db10__insert_iEPv=uninstrumented\nfun:_ZNSt3__111__libcpp_db11__insert_icEPvPKv=uninstrumented\nfun:_ZNSt3__111__libcpp_db15__iterator_copyEPvPKv=uninstrumented\nfun:_ZNSt3__111__libcpp_db16__invalidate_allEPv=uninstrumented\nfun:_ZNSt3__111__libcpp_db4swapEPvS1_=uninstrumented\nfun:_ZNSt3__111__libcpp_db9__erase_cEPv=uninstrumented\nfun:_ZNSt3__111__libcpp_db9__erase_iEPv=uninstrumented\nfun:_ZNSt3__111__libcpp_dbC1Ev=uninstrumented\nfun:_ZNSt3__111__libcpp_dbC2Ev=uninstrumented\nfun:_ZNSt3__111__libcpp_dbD1Ev=uninstrumented\nfun:_ZNSt3__111__libcpp_dbD2Ev=uninstrumented\nfun:_ZNSt3__111__money_getIcE13__gather_infoEbRKNS_6localeERNS_10money_base7patternERcS8_RNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEESF_SF_SF_Ri=uninstrumented\nfun:_ZNSt3__111__money_getIwE13__gather_infoEbRKNS_6localeERNS_10money_base7patternERwS8_RNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERNS9_IwNSA_IwEENSC_IwEEEESJ_SJ_Ri=uninstrumented\nfun:_ZNSt3__111__money_putIcE13__gather_infoEbbRKNS_6localeERNS_10money_base7patternERcS8_RNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEESF_SF_Ri=uninstrumented\nfun:_ZNSt3__111__money_putIcE8__formatEPcRS2_S3_jPKcS5_RKNS_5ctypeIcEEbRKNS_10money_base7patternEccRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEESL_SL_i=uninstrumented\nfun:_ZNSt3__111__money_putIwE13__gather_infoEbbRKNS_6localeERNS_10money_base7patternERwS8_RNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERNS9_IwNSA_IwEENSC_IwEEEESJ_Ri=uninstrumented\nfun:_ZNSt3__111__money_putIwE8__formatEPwRS2_S3_jPKwS5_RKNS_5ctypeIwEEbRKNS_10money_base7patternEwwRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERKNSE_IwNSF_IwEENSH_IwEEEESQ_i=uninstrumented\nfun:_ZNSt3__111regex_errorC1ENS_15regex_constants10error_typeE=uninstrumented\nfun:_ZNSt3__111regex_errorC2ENS_15regex_constants10error_typeE=uninstrumented\nfun:_ZNSt3__111regex_errorD0Ev=uninstrumented\nfun:_ZNSt3__111regex_errorD1Ev=uninstrumented\nfun:_ZNSt3__111regex_errorD2Ev=uninstrumented\nfun:_ZNSt3__111this_thread9sleep_forERKNS_6chrono8durationIxNS_5ratioILl1ELl1000000000EEEEE=uninstrumented\nfun:_ZNSt3__111timed_mutex4lockEv=uninstrumented\nfun:_ZNSt3__111timed_mutex6unlockEv=uninstrumented\nfun:_ZNSt3__111timed_mutex8try_lockEv=uninstrumented\nfun:_ZNSt3__111timed_mutexC1Ev=uninstrumented\nfun:_ZNSt3__111timed_mutexC2Ev=uninstrumented\nfun:_ZNSt3__111timed_mutexD1Ev=uninstrumented\nfun:_ZNSt3__111timed_mutexD2Ev=uninstrumented\nfun:_ZNSt3__112__do_nothingEPv=uninstrumented\nfun:_ZNSt3__112__get_sp_mutEPKv=uninstrumented\nfun:_ZNSt3__112__next_primeEm=uninstrumented\nfun:_ZNSt3__112__rs_defaultC1ERKS0_=uninstrumented\nfun:_ZNSt3__112__rs_defaultC1Ev=uninstrumented\nfun:_ZNSt3__112__rs_defaultC2ERKS0_=uninstrumented\nfun:_ZNSt3__112__rs_defaultC2Ev=uninstrumented\nfun:_ZNSt3__112__rs_defaultD1Ev=uninstrumented\nfun:_ZNSt3__112__rs_defaultD2Ev=uninstrumented\nfun:_ZNSt3__112__rs_defaultclEv=uninstrumented\nfun:_ZNSt3__112bad_weak_ptrD0Ev=uninstrumented\nfun:_ZNSt3__112bad_weak_ptrD1Ev=uninstrumented\nfun:_ZNSt3__112bad_weak_ptrD2Ev=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE21__grow_by_and_replaceEmmmmmmPKc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE2atEm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE5eraseEmm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEPKcm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEPKcmm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEmc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6appendEPKc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6appendEPKcm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6appendERKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6appendEmc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6assignEPKc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6assignEPKcm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6assignERKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6assignEmc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertENS_11__wrap_iterIPKcEEc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmPKc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmPKcm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmRKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6insertEmmc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6resizeEmc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7replaceEmmPKc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7replaceEmmPKcm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7replaceEmmRKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7replaceEmmmc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE7reserveEm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE9__grow_byEmmmmmm=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE9push_backEc=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC1ERKS5_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC1ERKS5_RKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC1ERKS5_mmRKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC2ERKS5_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC2ERKS5_RKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEC2ERKS5_mmRKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED2Ev=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEaSERKS5_=uninstrumented\nfun:_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEaSEc=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE21__grow_by_and_replaceEmmmmmmPKw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE2atEm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE5eraseEmm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6__initEPKwm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6__initEPKwmm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6__initEmw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6appendEPKw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6appendEPKwm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6appendERKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6appendEmw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6assignEPKw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6assignEPKwm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6assignERKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6assignEmw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6insertENS_11__wrap_iterIPKwEEw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6insertEmPKw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6insertEmPKwm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6insertEmRKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6insertEmmw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE6resizeEmw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7replaceEmmPKw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7replaceEmmPKwm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7replaceEmmRKS5_mm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7replaceEmmmw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE7reserveEm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE9__grow_byEmmmmmm=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEE9push_backEw=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEC1ERKS5_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEC1ERKS5_RKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEC1ERKS5_mmRKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEC2ERKS5_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEC2ERKS5_RKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEC2ERKS5_mmRKS4_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEED1Ev=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEED2Ev=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEaSERKS5_=uninstrumented\nfun:_ZNSt3__112basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEaSEw=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcEC1EPKcm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcEC2EPKcm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcED0Ev=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcED1Ev=uninstrumented\nfun:_ZNSt3__112ctype_bynameIcED2Ev=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwEC1EPKcm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwEC2EPKcm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwED0Ev=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwED1Ev=uninstrumented\nfun:_ZNSt3__112ctype_bynameIwED2Ev=uninstrumented\nfun:_ZNSt3__112future_errorC1ENS_10error_codeE=uninstrumented\nfun:_ZNSt3__112future_errorC2ENS_10error_codeE=uninstrumented\nfun:_ZNSt3__112future_errorD0Ev=uninstrumented\nfun:_ZNSt3__112future_errorD1Ev=uninstrumented\nfun:_ZNSt3__112future_errorD2Ev=uninstrumented\nfun:_ZNSt3__112strstreambuf3strEv=uninstrumented\nfun:_ZNSt3__112strstreambuf4swapERS0_=uninstrumented\nfun:_ZNSt3__112strstreambuf6__initEPclS1_=uninstrumented\nfun:_ZNSt3__112strstreambuf6freezeEb=uninstrumented\nfun:_ZNSt3__112strstreambuf7seekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__112strstreambuf7seekposENS_4fposI11__mbstate_tEEj=uninstrumented\nfun:_ZNSt3__112strstreambuf8overflowEi=uninstrumented\nfun:_ZNSt3__112strstreambuf9pbackfailEi=uninstrumented\nfun:_ZNSt3__112strstreambuf9underflowEv=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPFPvmEPFvS1_E=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPKal=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPKcl=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPKhl=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPalS1_=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPclS1_=uninstrumented\nfun:_ZNSt3__112strstreambufC1EPhlS1_=uninstrumented\nfun:_ZNSt3__112strstreambufC1El=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPFPvmEPFvS1_E=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPKal=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPKcl=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPKhl=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPalS1_=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPclS1_=uninstrumented\nfun:_ZNSt3__112strstreambufC2EPhlS1_=uninstrumented\nfun:_ZNSt3__112strstreambufC2El=uninstrumented\nfun:_ZNSt3__112strstreambufD0Ev=uninstrumented\nfun:_ZNSt3__112strstreambufD1Ev=uninstrumented\nfun:_ZNSt3__112strstreambufD2Ev=uninstrumented\nfun:_ZNSt3__112system_error6__initERKNS_10error_codeENS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__112system_errorC1ENS_10error_codeE=uninstrumented\nfun:_ZNSt3__112system_errorC1ENS_10error_codeEPKc=uninstrumented\nfun:_ZNSt3__112system_errorC1ENS_10error_codeERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__112system_errorC1EiRKNS_14error_categoryE=uninstrumented\nfun:_ZNSt3__112system_errorC1EiRKNS_14error_categoryEPKc=uninstrumented\nfun:_ZNSt3__112system_errorC1EiRKNS_14error_categoryERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__112system_errorC2ENS_10error_codeE=uninstrumented\nfun:_ZNSt3__112system_errorC2ENS_10error_codeEPKc=uninstrumented\nfun:_ZNSt3__112system_errorC2ENS_10error_codeERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__112system_errorC2EiRKNS_14error_categoryE=uninstrumented\nfun:_ZNSt3__112system_errorC2EiRKNS_14error_categoryEPKc=uninstrumented\nfun:_ZNSt3__112system_errorC2EiRKNS_14error_categoryERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__112system_errorD0Ev=uninstrumented\nfun:_ZNSt3__112system_errorD1Ev=uninstrumented\nfun:_ZNSt3__112system_errorD2Ev=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE11__read_modeEv=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE12__write_modeEv=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE4openEPKcj=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE4syncEv=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE5closeEv=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE5imbueERKNS_6localeE=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE6setbufEPcl=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE7seekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE7seekposENS_4fposI11__mbstate_tEEj=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE8overflowEi=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE9pbackfailEi=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEE9underflowEv=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEEC1EOS3_=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEEC1Ev=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEEC2EOS3_=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEEC2Ev=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZNSt3__113basic_filebufIcNS_11char_traitsIcEEED2Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE3getEPcl=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE3getEPclc=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE3getERNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE3getERNS_15basic_streambufIcS2_EEc=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE3getERc=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE3getEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE4peekEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE4readEPcl=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE4syncEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE5seekgENS_4fposI11__mbstate_tEE=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE5seekgExNS_8ios_base7seekdirE=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE5tellgEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE5ungetEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE6ignoreEli=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE6sentryC1ERS3_b=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE6sentryC2ERS3_b=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE7getlineEPcl=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE7getlineEPclc=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE7putbackEc=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEE8readsomeEPcl=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEEC1EPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEEC2EPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEED2Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsEPFRNS_8ios_baseES5_E=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsEPFRNS_9basic_iosIcS2_EES6_E=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsEPFRS3_S4_E=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsEPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERPv=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERb=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERd=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERe=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERf=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERi=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERj=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERl=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERm=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERs=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERt=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERx=uninstrumented\nfun:_ZNSt3__113basic_istreamIcNS_11char_traitsIcEEErsERy=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE3getEPwl=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE3getEPwlw=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE3getERNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE3getERNS_15basic_streambufIwS2_EEw=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE3getERw=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE3getEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE4peekEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE4readEPwl=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE4syncEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE5seekgENS_4fposI11__mbstate_tEE=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE5seekgExNS_8ios_base7seekdirE=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE5tellgEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE5ungetEv=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE6ignoreElj=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE6sentryC1ERS3_b=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE6sentryC2ERS3_b=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE7getlineEPwl=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE7getlineEPwlw=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE7putbackEw=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEE8readsomeEPwl=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEEC1EPNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEEC2EPNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEED0Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEED1Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEED2Ev=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsEPFRNS_8ios_baseES5_E=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsEPFRNS_9basic_iosIwS2_EES6_E=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsEPFRS3_S4_E=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsEPNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERPv=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERb=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERd=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERe=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERf=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERi=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERj=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERl=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERm=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERs=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERt=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERx=uninstrumented\nfun:_ZNSt3__113basic_istreamIwNS_11char_traitsIwEEErsERy=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE3putEc=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5flushEv=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5seekpENS_4fposI11__mbstate_tEE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5seekpExNS_8ios_base7seekdirE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5tellpEv=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE5writeEPKcl=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE6sentryC1ERS3_=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE6sentryC2ERS3_=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE6sentryD1Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEE6sentryD2Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEEC1EPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEEC2EPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEED2Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEPFRNS_8ios_baseES5_E=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEPFRNS_9basic_iosIcS2_EES6_E=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEPFRS3_S4_E=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEPKv=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEb=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEd=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEe=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEf=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEi=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEj=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEl=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEm=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEs=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEt=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEx=uninstrumented\nfun:_ZNSt3__113basic_ostreamIcNS_11char_traitsIcEEElsEy=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE3putEw=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE5flushEv=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE5seekpENS_4fposI11__mbstate_tEE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE5seekpExNS_8ios_base7seekdirE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE5tellpEv=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE5writeEPKwl=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE6sentryC1ERS3_=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE6sentryC2ERS3_=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE6sentryD1Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEE6sentryD2Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEEC1EPNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEEC2EPNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEED0Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEED1Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEED2Ev=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEPFRNS_8ios_baseES5_E=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEPFRNS_9basic_iosIwS2_EES6_E=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEPFRS3_S4_E=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEPKv=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEPNS_15basic_streambufIwS2_EE=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEb=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEd=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEe=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEf=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEi=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEj=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEl=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEm=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEs=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEt=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEx=uninstrumented\nfun:_ZNSt3__113basic_ostreamIwNS_11char_traitsIwEEElsEy=uninstrumented\nfun:_ZNSt3__113random_deviceC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__113random_deviceC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__113random_deviceD1Ev=uninstrumented\nfun:_ZNSt3__113random_deviceD2Ev=uninstrumented\nfun:_ZNSt3__113random_deviceclEv=uninstrumented\nfun:_ZNSt3__113shared_futureIvED1Ev=uninstrumented\nfun:_ZNSt3__113shared_futureIvED2Ev=uninstrumented\nfun:_ZNSt3__113shared_futureIvEaSERKS1_=uninstrumented\nfun:_ZNSt3__114__get_const_dbEv=uninstrumented\nfun:_ZNSt3__114__num_get_base10__get_baseERNS_8ios_baseE=uninstrumented\nfun:_ZNSt3__114__num_put_base12__format_intEPcPKcbj=uninstrumented\nfun:_ZNSt3__114__num_put_base14__format_floatEPcPKcj=uninstrumented\nfun:_ZNSt3__114__num_put_base18__identify_paddingEPcS1_RKNS_8ios_baseE=uninstrumented\nfun:_ZNSt3__114__shared_count12__add_sharedEv=uninstrumented\nfun:_ZNSt3__114__shared_count16__release_sharedEv=uninstrumented\nfun:_ZNSt3__114__shared_countD0Ev=uninstrumented\nfun:_ZNSt3__114__shared_countD1Ev=uninstrumented\nfun:_ZNSt3__114__shared_countD2Ev=uninstrumented\nfun:_ZNSt3__114basic_ifstreamIcNS_11char_traitsIcEEE4openEPKcj=uninstrumented\nfun:_ZNSt3__114basic_ifstreamIcNS_11char_traitsIcEEE4openERKNS_12basic_stringIcS2_NS_9allocatorIcEEEEj=uninstrumented\nfun:_ZNSt3__114basic_iostreamIcNS_11char_traitsIcEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__114basic_iostreamIcNS_11char_traitsIcEEEC1EPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__114basic_iostreamIcNS_11char_traitsIcEEEC2EPNS_15basic_streambufIcS2_EE=uninstrumented\nfun:_ZNSt3__114basic_iostreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZNSt3__114basic_iostreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZNSt3__114basic_iostreamIcNS_11char_traitsIcEEED2Ev=uninstrumented\nfun:_ZNSt3__114basic_ofstreamIcNS_11char_traitsIcEEE4openEPKcj=uninstrumented\nfun:_ZNSt3__114basic_ofstreamIcNS_11char_traitsIcEEE4openERKNS_12basic_stringIcS2_NS_9allocatorIcEEEEj=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDiDu11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDiDu11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDiDu11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDic11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDic11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDic11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDsDu11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDsDu11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDsDu11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDsc11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDsc11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIDsc11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIcc11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIcc11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIcc11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIwc11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIwc11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__114codecvt_bynameIwc11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__114collate_bynameIcEC1EPKcm=uninstrumented\nfun:_ZNSt3__114collate_bynameIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__114collate_bynameIcEC2EPKcm=uninstrumented\nfun:_ZNSt3__114collate_bynameIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__114collate_bynameIcED0Ev=uninstrumented\nfun:_ZNSt3__114collate_bynameIcED1Ev=uninstrumented\nfun:_ZNSt3__114collate_bynameIcED2Ev=uninstrumented\nfun:_ZNSt3__114collate_bynameIwEC1EPKcm=uninstrumented\nfun:_ZNSt3__114collate_bynameIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__114collate_bynameIwEC2EPKcm=uninstrumented\nfun:_ZNSt3__114collate_bynameIwEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__114collate_bynameIwED0Ev=uninstrumented\nfun:_ZNSt3__114collate_bynameIwED1Ev=uninstrumented\nfun:_ZNSt3__114collate_bynameIwED2Ev=uninstrumented\nfun:_ZNSt3__114error_categoryC2Ev=uninstrumented\nfun:_ZNSt3__114error_categoryD0Ev=uninstrumented\nfun:_ZNSt3__114error_categoryD1Ev=uninstrumented\nfun:_ZNSt3__114error_categoryD2Ev=uninstrumented\nfun:_ZNSt3__115__get_classnameEPKcb=uninstrumented\nfun:_ZNSt3__115__thread_struct25notify_all_at_thread_exitEPNS_18condition_variableEPNS_5mutexE=uninstrumented\nfun:_ZNSt3__115__thread_struct27__make_ready_at_thread_exitEPNS_17__assoc_sub_stateE=uninstrumented\nfun:_ZNSt3__115__thread_structC1Ev=uninstrumented\nfun:_ZNSt3__115__thread_structC2Ev=uninstrumented\nfun:_ZNSt3__115__thread_structD1Ev=uninstrumented\nfun:_ZNSt3__115__thread_structD2Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE10pubseekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE10pubseekposENS_4fposI11__mbstate_tEEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE4setgEPcS4_S4_=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE4setpEPcS4_=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE4syncEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5gbumpEi=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5imbueERKNS_6localeE=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5pbumpEi=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sgetcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sgetnEPcl=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sputcEc=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5sputnEPKcl=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE5uflowEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE6sbumpcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE6setbufEPcl=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE6snextcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE6xsgetnEPcl=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE6xsputnEPKcl=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE7pubsyncEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE7seekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE7seekposENS_4fposI11__mbstate_tEEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE7sungetcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE8in_availEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE8overflowEi=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE8pubimbueERKNS_6localeE=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE9pbackfailEi=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE9pubsetbufEPcl=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE9showmanycEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE9sputbackcEc=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEE9underflowEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEEC1ERKS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEEC1Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEEC2ERKS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEEC2Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEED2Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIcNS_11char_traitsIcEEEaSERKS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE10pubseekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE10pubseekposENS_4fposI11__mbstate_tEEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE4setgEPwS4_S4_=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE4setpEPwS4_=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE4swapERS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE4syncEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5gbumpEi=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5imbueERKNS_6localeE=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5pbumpEi=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5sgetcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5sgetnEPwl=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5sputcEw=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5sputnEPKwl=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE5uflowEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE6sbumpcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE6setbufEPwl=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE6snextcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE6xsgetnEPwl=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE6xsputnEPKwl=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE7pubsyncEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE7seekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE7seekposENS_4fposI11__mbstate_tEEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE7sungetcEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE8in_availEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE8overflowEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE8pubimbueERKNS_6localeE=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE9pbackfailEj=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE9pubsetbufEPwl=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE9showmanycEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE9sputbackcEw=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEE9underflowEv=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEEC1ERKS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEEC1Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEEC2ERKS3_=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEEC2Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEED0Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEED1Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEED2Ev=uninstrumented\nfun:_ZNSt3__115basic_streambufIwNS_11char_traitsIwEEEaSERKS3_=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE3strERKNS_12basic_stringIcS2_S4_EE=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE4swapERS5_=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE7seekoffExNS_8ios_base7seekdirEj=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE8overflowEi=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE9pbackfailEi=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEE9underflowEv=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEEC1EOS5_=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEEC2EOS5_=uninstrumented\nfun:_ZNSt3__115basic_stringbufIcNS_11char_traitsIcEENS_9allocatorIcEEEaSEOS5_=uninstrumented\nfun:_ZNSt3__115future_categoryEv=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcE6__initEPKc=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcEC1EPKcm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcEC2EPKcm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcED0Ev=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcED1Ev=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIcED2Ev=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwE6__initEPKc=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwEC1EPKcm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwEC2EPKcm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEm=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwED0Ev=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwED1Ev=uninstrumented\nfun:_ZNSt3__115numpunct_bynameIwED2Ev=uninstrumented\nfun:_ZNSt3__115recursive_mutex4lockEv=uninstrumented\nfun:_ZNSt3__115recursive_mutex6unlockEv=uninstrumented\nfun:_ZNSt3__115recursive_mutex8try_lockEv=uninstrumented\nfun:_ZNSt3__115recursive_mutexC1Ev=uninstrumented\nfun:_ZNSt3__115recursive_mutexC2Ev=uninstrumented\nfun:_ZNSt3__115recursive_mutexD1Ev=uninstrumented\nfun:_ZNSt3__115recursive_mutexD2Ev=uninstrumented\nfun:_ZNSt3__115system_categoryEv=uninstrumented\nfun:_ZNSt3__116__check_groupingERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPjS8_Rj=uninstrumented\nfun:_ZNSt3__116__narrow_to_utf8ILm16EED0Ev=uninstrumented\nfun:_ZNSt3__116__narrow_to_utf8ILm16EED1Ev=uninstrumented\nfun:_ZNSt3__116__narrow_to_utf8ILm16EED2Ev=uninstrumented\nfun:_ZNSt3__116__narrow_to_utf8ILm32EED0Ev=uninstrumented\nfun:_ZNSt3__116__narrow_to_utf8ILm32EED1Ev=uninstrumented\nfun:_ZNSt3__116__narrow_to_utf8ILm32EED2Ev=uninstrumented\nfun:_ZNSt3__116generic_categoryEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state12__make_readyEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state13set_exceptionESt13exception_ptr=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state16__on_zero_sharedEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state24set_value_at_thread_exitEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state28set_exception_at_thread_exitESt13exception_ptr=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state4copyEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state4waitEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state9__executeEv=uninstrumented\nfun:_ZNSt3__117__assoc_sub_state9set_valueEv=uninstrumented\nfun:_ZNSt3__117__widen_from_utf8ILm16EED0Ev=uninstrumented\nfun:_ZNSt3__117__widen_from_utf8ILm16EED1Ev=uninstrumented\nfun:_ZNSt3__117__widen_from_utf8ILm16EED2Ev=uninstrumented\nfun:_ZNSt3__117__widen_from_utf8ILm32EED0Ev=uninstrumented\nfun:_ZNSt3__117__widen_from_utf8ILm32EED1Ev=uninstrumented\nfun:_ZNSt3__117__widen_from_utf8ILm32EED2Ev=uninstrumented\nfun:_ZNSt3__117declare_reachableEPv=uninstrumented\nfun:_ZNSt3__117iostream_categoryEv=uninstrumented\nfun:_ZNSt3__117moneypunct_bynameIcLb0EE4initEPKc=uninstrumented\nfun:_ZNSt3__117moneypunct_bynameIcLb1EE4initEPKc=uninstrumented\nfun:_ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc=uninstrumented\nfun:_ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc=uninstrumented\nfun:_ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIcEC1EPKc=uninstrumented\nfun:_ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIcEC2EPKc=uninstrumented\nfun:_ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIwEC1EPKc=uninstrumented\nfun:_ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__118__time_get_storageIwEC2EPKc=uninstrumented\nfun:_ZNSt3__118__time_get_storageIwEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__118basic_stringstreamIcNS_11char_traitsIcEENS_9allocatorIcEEEaSEOS5_=uninstrumented\nfun:_ZNSt3__118condition_variable10notify_allEv=uninstrumented\nfun:_ZNSt3__118condition_variable10notify_oneEv=uninstrumented\nfun:_ZNSt3__118condition_variable15__do_timed_waitERNS_11unique_lockINS_5mutexEEENS_6chrono10time_pointINS5_12system_clockENS5_8durationIxNS_5ratioILl1ELl1000000000EEEEEEE=uninstrumented\nfun:_ZNSt3__118condition_variable4waitERNS_11unique_lockINS_5mutexEEE=uninstrumented\nfun:_ZNSt3__118condition_variableD1Ev=uninstrumented\nfun:_ZNSt3__118condition_variableD2Ev=uninstrumented\nfun:_ZNSt3__118get_pointer_safetyEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutex11lock_sharedEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutex13unlock_sharedEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutex15try_lock_sharedEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutex4lockEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutex6unlockEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutex8try_lockEv=uninstrumented\nfun:_ZNSt3__118shared_timed_mutexC1Ev=uninstrumented\nfun:_ZNSt3__118shared_timed_mutexC2Ev=uninstrumented\nfun:_ZNSt3__119__shared_mutex_base11lock_sharedEv=uninstrumented\nfun:_ZNSt3__119__shared_mutex_base13unlock_sharedEv=uninstrumented\nfun:_ZNSt3__119__shared_mutex_base15try_lock_sharedEv=uninstrumented\nfun:_ZNSt3__119__shared_mutex_base4lockEv=uninstrumented\nfun:_ZNSt3__119__shared_mutex_base6unlockEv=uninstrumented\nfun:_ZNSt3__119__shared_mutex_base8try_lockEv=uninstrumented\nfun:_ZNSt3__119__shared_mutex_baseC1Ev=uninstrumented\nfun:_ZNSt3__119__shared_mutex_baseC2Ev=uninstrumented\nfun:_ZNSt3__119__shared_weak_count10__add_weakEv=uninstrumented\nfun:_ZNSt3__119__shared_weak_count12__add_sharedEv=uninstrumented\nfun:_ZNSt3__119__shared_weak_count14__release_weakEv=uninstrumented\nfun:_ZNSt3__119__shared_weak_count16__release_sharedEv=uninstrumented\nfun:_ZNSt3__119__shared_weak_count4lockEv=uninstrumented\nfun:_ZNSt3__119__shared_weak_countD0Ev=uninstrumented\nfun:_ZNSt3__119__shared_weak_countD1Ev=uninstrumented\nfun:_ZNSt3__119__shared_weak_countD2Ev=uninstrumented\nfun:_ZNSt3__119__thread_local_dataEv=uninstrumented\nfun:_ZNSt3__119basic_istringstreamIcNS_11char_traitsIcEENS_9allocatorIcEEEaSEOS5_=uninstrumented\nfun:_ZNSt3__119basic_ostringstreamIcNS_11char_traitsIcEENS_9allocatorIcEEEaSEOS5_=uninstrumented\nfun:_ZNSt3__119declare_no_pointersEPcm=uninstrumented\nfun:_ZNSt3__120__get_collation_nameEPKc=uninstrumented\nfun:_ZNSt3__120__libcpp_atomic_waitEPVKNS_17__cxx_atomic_implIiNS_22__cxx_atomic_base_implIiEEEEi=uninstrumented\nfun:_ZNSt3__120__libcpp_atomic_waitEPVKvi=uninstrumented\nfun:_ZNSt3__120__throw_system_errorEiPKc=uninstrumented\nfun:_ZNSt3__121__throw_runtime_errorEPKc=uninstrumented\nfun:_ZNSt3__121__undeclare_reachableEPv=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutex4lockEv=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutex6unlockEv=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutex8try_lockEv=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutexC1Ev=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutexC2Ev=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutexD1Ev=uninstrumented\nfun:_ZNSt3__121recursive_timed_mutexD2Ev=uninstrumented\nfun:_ZNSt3__121undeclare_no_pointersEPcm=uninstrumented\nfun:_ZNSt3__123__cxx_atomic_notify_allEPVKNS_17__cxx_atomic_implIiNS_22__cxx_atomic_base_implIiEEEE=uninstrumented\nfun:_ZNSt3__123__cxx_atomic_notify_allEPVKv=uninstrumented\nfun:_ZNSt3__123__cxx_atomic_notify_oneEPVKNS_17__cxx_atomic_implIiNS_22__cxx_atomic_base_implIiEEEE=uninstrumented\nfun:_ZNSt3__123__cxx_atomic_notify_oneEPVKv=uninstrumented\nfun:_ZNSt3__123__libcpp_atomic_monitorEPVKNS_17__cxx_atomic_implIiNS_22__cxx_atomic_base_implIiEEEE=uninstrumented\nfun:_ZNSt3__123__libcpp_atomic_monitorEPVKv=uninstrumented\nfun:_ZNSt3__125notify_all_at_thread_exitERNS_18condition_variableENS_11unique_lockINS_5mutexEEE=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIaaEEPaEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIccEEPcEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIddEEPdEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIeeEEPeEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIffEEPfEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIhhEEPhEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIiiEEPiEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIjjEEPjEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIllEEPlEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessImmEEPmEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIssEEPsEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIttEEPtEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIwwEEPwEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIxxEEPxEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__insertion_sort_incompleteIRNS_6__lessIyyEEPyEEbT0_S5_T_=uninstrumented\nfun:_ZNSt3__127__libcpp_set_debug_functionEPFvRKNS_19__libcpp_debug_infoEE=uninstrumented\nfun:_ZNSt3__129__libcpp_abort_debug_functionERKNS_19__libcpp_debug_infoE=uninstrumented\nfun:_ZNSt3__131__arrive_barrier_algorithm_baseEPNS_24__barrier_algorithm_baseEh=uninstrumented\nfun:_ZNSt3__132__destroy_barrier_algorithm_baseEPNS_24__barrier_algorithm_baseE=uninstrumented\nfun:_ZNSt3__134__construct_barrier_algorithm_baseERl=uninstrumented\nfun:_ZNSt3__14__fs10filesystem10__absoluteERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem10hash_valueERKNS1_4pathE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem11__canonicalERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem11__copy_fileERKNS1_4pathES4_NS1_12copy_optionsEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem11__file_sizeERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem12__equivalentERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem12__remove_allERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem13__fs_is_emptyERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem13__permissionsERKNS1_4pathENS1_5permsENS1_12perm_optionsEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem13__resize_fileERKNS1_4pathEmPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem14__copy_symlinkERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem14__current_pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem14__current_pathERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem14__read_symlinkERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem15directory_entry12__do_refreshEv=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16_FilesystemClock3nowEv=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16__create_symlinkERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16__symlink_statusERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16filesystem_error13__create_whatEi=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16filesystem_errorD0Ev=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16filesystem_errorD1Ev=uninstrumented\nfun:_ZNSt3__14__fs10filesystem16filesystem_errorD2Ev=uninstrumented\nfun:_ZNSt3__14__fs10filesystem17__hard_link_countERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem17__last_write_timeERKNS1_4pathENS_6chrono10time_pointINS1_16_FilesystemClockENS5_8durationInNS_5ratioILl1ELl1000000000EEEEEEEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem17__last_write_timeERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18__create_directoryERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18__create_directoryERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18__create_hard_linkERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18__weakly_canonicalERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18directory_iterator11__incrementEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18directory_iteratorC1ERKNS1_4pathEPNS_10error_codeENS1_17directory_optionsE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem18directory_iteratorC2ERKNS1_4pathEPNS_10error_codeENS1_17directory_optionsE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem20__create_directoriesERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem21__temp_directory_pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem26__create_directory_symlinkERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem28recursive_directory_iterator11__incrementEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem28recursive_directory_iterator15__try_recursionEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem28recursive_directory_iterator5__popEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem28recursive_directory_iterator9__advanceEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem28recursive_directory_iteratorC1ERKNS1_4pathENS1_17directory_optionsEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem28recursive_directory_iteratorC2ERKNS1_4pathENS1_17directory_optionsEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem4path17replace_extensionERKS2_=uninstrumented\nfun:_ZNSt3__14__fs10filesystem4path8iterator11__decrementEv=uninstrumented\nfun:_ZNSt3__14__fs10filesystem4path8iterator11__incrementEv=uninstrumented\nfun:_ZNSt3__14__fs10filesystem6__copyERKNS1_4pathES4_NS1_12copy_optionsEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem7__spaceERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem8__removeERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem8__renameERKNS1_4pathES4_PNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14__fs10filesystem8__statusERKNS1_4pathEPNS_10error_codeE=uninstrumented\nfun:_ZNSt3__14stodERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPm=uninstrumented\nfun:_ZNSt3__14stodERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPm=uninstrumented\nfun:_ZNSt3__14stofERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPm=uninstrumented\nfun:_ZNSt3__14stofERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPm=uninstrumented\nfun:_ZNSt3__14stoiERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPmi=uninstrumented\nfun:_ZNSt3__14stoiERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPmi=uninstrumented\nfun:_ZNSt3__14stolERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPmi=uninstrumented\nfun:_ZNSt3__14stolERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPmi=uninstrumented\nfun:_ZNSt3__15alignEmmRPvRm=uninstrumented\nfun:_ZNSt3__15ctypeIcE13classic_tableEv=uninstrumented\nfun:_ZNSt3__15ctypeIcE21__classic_lower_tableEv=uninstrumented\nfun:_ZNSt3__15ctypeIcE21__classic_upper_tableEv=uninstrumented\nfun:_ZNSt3__15ctypeIcEC1EPKtbm=uninstrumented\nfun:_ZNSt3__15ctypeIcEC2EPKtbm=uninstrumented\nfun:_ZNSt3__15ctypeIcED0Ev=uninstrumented\nfun:_ZNSt3__15ctypeIcED1Ev=uninstrumented\nfun:_ZNSt3__15ctypeIcED2Ev=uninstrumented\nfun:_ZNSt3__15ctypeIwED0Ev=uninstrumented\nfun:_ZNSt3__15ctypeIwED1Ev=uninstrumented\nfun:_ZNSt3__15ctypeIwED2Ev=uninstrumented\nfun:_ZNSt3__15mutex4lockEv=uninstrumented\nfun:_ZNSt3__15mutex6unlockEv=uninstrumented\nfun:_ZNSt3__15mutex8try_lockEv=uninstrumented\nfun:_ZNSt3__15mutexD1Ev=uninstrumented\nfun:_ZNSt3__15mutexD2Ev=uninstrumented\nfun:_ZNSt3__15stoldERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPm=uninstrumented\nfun:_ZNSt3__15stoldERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPm=uninstrumented\nfun:_ZNSt3__15stollERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPmi=uninstrumented\nfun:_ZNSt3__15stollERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPmi=uninstrumented\nfun:_ZNSt3__15stoulERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPmi=uninstrumented\nfun:_ZNSt3__15stoulERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPmi=uninstrumented\nfun:_ZNSt3__16__clocEv=uninstrumented\nfun:_ZNSt3__16__itoa8__u32toaEjPc=uninstrumented\nfun:_ZNSt3__16__itoa8__u64toaEmPc=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIaaEEPaEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIccEEPcEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIddEEPdEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIeeEEPeEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIffEEPfEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIhhEEPhEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIiiEEPiEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIjjEEPjEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIllEEPlEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessImmEEPmEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIssEEPsEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIttEEPtEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIwwEEPwEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIxxEEPxEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16__sortIRNS_6__lessIyyEEPyEEvT0_S5_T_=uninstrumented\nfun:_ZNSt3__16chrono12steady_clock3nowEv=uninstrumented\nfun:_ZNSt3__16chrono12system_clock11from_time_tEl=uninstrumented\nfun:_ZNSt3__16chrono12system_clock3nowEv=uninstrumented\nfun:_ZNSt3__16chrono12system_clock9to_time_tERKNS0_10time_pointIS1_NS0_8durationIxNS_5ratioILl1ELl1000000EEEEEEE=uninstrumented\nfun:_ZNSt3__16futureIvE3getEv=uninstrumented\nfun:_ZNSt3__16futureIvEC1EPNS_17__assoc_sub_stateE=uninstrumented\nfun:_ZNSt3__16futureIvEC2EPNS_17__assoc_sub_stateE=uninstrumented\nfun:_ZNSt3__16futureIvED1Ev=uninstrumented\nfun:_ZNSt3__16futureIvED2Ev=uninstrumented\nfun:_ZNSt3__16gslice6__initEm=uninstrumented\nfun:_ZNSt3__16locale14__install_ctorERKS0_PNS0_5facetEl=uninstrumented\nfun:_ZNSt3__16locale2id5__getEv=uninstrumented\nfun:_ZNSt3__16locale2id6__initEv=uninstrumented\nfun:_ZNSt3__16locale5facet16__on_zero_sharedEv=uninstrumented\nfun:_ZNSt3__16locale5facetD0Ev=uninstrumented\nfun:_ZNSt3__16locale5facetD1Ev=uninstrumented\nfun:_ZNSt3__16locale5facetD2Ev=uninstrumented\nfun:_ZNSt3__16locale6globalERKS0_=uninstrumented\nfun:_ZNSt3__16locale7classicEv=uninstrumented\nfun:_ZNSt3__16locale8__globalEv=uninstrumented\nfun:_ZNSt3__16localeC1EPKc=uninstrumented\nfun:_ZNSt3__16localeC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__16localeC1ERKS0_=uninstrumented\nfun:_ZNSt3__16localeC1ERKS0_PKci=uninstrumented\nfun:_ZNSt3__16localeC1ERKS0_RKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEi=uninstrumented\nfun:_ZNSt3__16localeC1ERKS0_S2_i=uninstrumented\nfun:_ZNSt3__16localeC1Ev=uninstrumented\nfun:_ZNSt3__16localeC2EPKc=uninstrumented\nfun:_ZNSt3__16localeC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE=uninstrumented\nfun:_ZNSt3__16localeC2ERKS0_=uninstrumented\nfun:_ZNSt3__16localeC2ERKS0_PKci=uninstrumented\nfun:_ZNSt3__16localeC2ERKS0_RKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEi=uninstrumented\nfun:_ZNSt3__16localeC2ERKS0_S2_i=uninstrumented\nfun:_ZNSt3__16localeC2Ev=uninstrumented\nfun:_ZNSt3__16localeD1Ev=uninstrumented\nfun:_ZNSt3__16localeD2Ev=uninstrumented\nfun:_ZNSt3__16localeaSERKS0_=uninstrumented\nfun:_ZNSt3__16stoullERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPmi=uninstrumented\nfun:_ZNSt3__16stoullERKNS_12basic_stringIwNS_11char_traitsIwEENS_9allocatorIwEEEEPmi=uninstrumented\nfun:_ZNSt3__16thread20hardware_concurrencyEv=uninstrumented\nfun:_ZNSt3__16thread4joinEv=uninstrumented\nfun:_ZNSt3__16thread6detachEv=uninstrumented\nfun:_ZNSt3__16threadD1Ev=uninstrumented\nfun:_ZNSt3__16threadD2Ev=uninstrumented\nfun:_ZNSt3__17__sort5IRNS_6__lessIeeEEPeEEjT0_S5_S5_S5_S5_T_=uninstrumented\nfun:_ZNSt3__17codecvtIDiDu11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDiDu11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDiDu11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDic11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDic11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDic11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDsDu11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDsDu11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDsDu11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDsc11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDsc11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__17codecvtIDsc11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__17codecvtIcc11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__17codecvtIcc11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__17codecvtIcc11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tEC1EPKcm=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tEC1Em=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tEC2EPKcm=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tEC2Em=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tED0Ev=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tED1Ev=uninstrumented\nfun:_ZNSt3__17codecvtIwc11__mbstate_tED2Ev=uninstrumented\nfun:_ZNSt3__17collateIcED0Ev=uninstrumented\nfun:_ZNSt3__17collateIcED1Ev=uninstrumented\nfun:_ZNSt3__17collateIcED2Ev=uninstrumented\nfun:_ZNSt3__17collateIwED0Ev=uninstrumented\nfun:_ZNSt3__17collateIwED1Ev=uninstrumented\nfun:_ZNSt3__17collateIwED2Ev=uninstrumented\nfun:_ZNSt3__17promiseIvE10get_futureEv=uninstrumented\nfun:_ZNSt3__17promiseIvE13set_exceptionESt13exception_ptr=uninstrumented\nfun:_ZNSt3__17promiseIvE24set_value_at_thread_exitEv=uninstrumented\nfun:_ZNSt3__17promiseIvE28set_exception_at_thread_exitESt13exception_ptr=uninstrumented\nfun:_ZNSt3__17promiseIvE9set_valueEv=uninstrumented\nfun:_ZNSt3__17promiseIvEC1Ev=uninstrumented\nfun:_ZNSt3__17promiseIvEC2Ev=uninstrumented\nfun:_ZNSt3__17promiseIvED1Ev=uninstrumented\nfun:_ZNSt3__17promiseIvED2Ev=uninstrumented\nfun:_ZNSt3__18__c_node5__addEPNS_8__i_nodeE=uninstrumented\nfun:_ZNSt3__18__c_nodeD0Ev=uninstrumented\nfun:_ZNSt3__18__c_nodeD1Ev=uninstrumented\nfun:_ZNSt3__18__c_nodeD2Ev=uninstrumented\nfun:_ZNSt3__18__get_dbEv=uninstrumented\nfun:_ZNSt3__18__i_nodeD1Ev=uninstrumented\nfun:_ZNSt3__18__i_nodeD2Ev=uninstrumented\nfun:_ZNSt3__18__rs_getEv=uninstrumented\nfun:_ZNSt3__18__sp_mut4lockEv=uninstrumented\nfun:_ZNSt3__18__sp_mut6unlockEv=uninstrumented\nfun:_ZNSt3__18ios_base15sync_with_stdioEb=uninstrumented\nfun:_ZNSt3__18ios_base16__call_callbacksENS0_5eventE=uninstrumented\nfun:_ZNSt3__18ios_base17register_callbackEPFvNS0_5eventERS0_iEi=uninstrumented\nfun:_ZNSt3__18ios_base33__set_badbit_and_consider_rethrowEv=uninstrumented\nfun:_ZNSt3__18ios_base34__set_failbit_and_consider_rethrowEv=uninstrumented\nfun:_ZNSt3__18ios_base4InitC1Ev=uninstrumented\nfun:_ZNSt3__18ios_base4InitC2Ev=uninstrumented\nfun:_ZNSt3__18ios_base4InitD1Ev=uninstrumented\nfun:_ZNSt3__18ios_base4InitD2Ev=uninstrumented\nfun:_ZNSt3__18ios_base4initEPv=uninstrumented\nfun:_ZNSt3__18ios_base4moveERS0_=uninstrumented\nfun:_ZNSt3__18ios_base4swapERS0_=uninstrumented\nfun:_ZNSt3__18ios_base5clearEj=uninstrumented\nfun:_ZNSt3__18ios_base5imbueERKNS_6localeE=uninstrumented\nfun:_ZNSt3__18ios_base5iwordEi=uninstrumented\nfun:_ZNSt3__18ios_base5pwordEi=uninstrumented\nfun:_ZNSt3__18ios_base6xallocEv=uninstrumented\nfun:_ZNSt3__18ios_base7copyfmtERKS0_=uninstrumented\nfun:_ZNSt3__18ios_base7failureC1EPKcRKNS_10error_codeE=uninstrumented\nfun:_ZNSt3__18ios_base7failureC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERKNS_10error_codeE=uninstrumented\nfun:_ZNSt3__18ios_base7failureC2EPKcRKNS_10error_codeE=uninstrumented\nfun:_ZNSt3__18ios_base7failureC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEERKNS_10error_codeE=uninstrumented\nfun:_ZNSt3__18ios_base7failureD0Ev=uninstrumented\nfun:_ZNSt3__18ios_base7failureD1Ev=uninstrumented\nfun:_ZNSt3__18ios_base7failureD2Ev=uninstrumented\nfun:_ZNSt3__18ios_baseD0Ev=uninstrumented\nfun:_ZNSt3__18ios_baseD1Ev=uninstrumented\nfun:_ZNSt3__18ios_baseD2Ev=uninstrumented\nfun:_ZNSt3__18numpunctIcEC1Em=uninstrumented\nfun:_ZNSt3__18numpunctIcEC2Em=uninstrumented\nfun:_ZNSt3__18numpunctIcED0Ev=uninstrumented\nfun:_ZNSt3__18numpunctIcED1Ev=uninstrumented\nfun:_ZNSt3__18numpunctIcED2Ev=uninstrumented\nfun:_ZNSt3__18numpunctIwEC1Em=uninstrumented\nfun:_ZNSt3__18numpunctIwEC2Em=uninstrumented\nfun:_ZNSt3__18numpunctIwED0Ev=uninstrumented\nfun:_ZNSt3__18numpunctIwED1Ev=uninstrumented\nfun:_ZNSt3__18numpunctIwED2Ev=uninstrumented\nfun:_ZNSt3__18valarrayImE6resizeEmm=uninstrumented\nfun:_ZNSt3__18valarrayImEC1Em=uninstrumented\nfun:_ZNSt3__18valarrayImEC2Em=uninstrumented\nfun:_ZNSt3__18valarrayImED1Ev=uninstrumented\nfun:_ZNSt3__18valarrayImED2Ev=uninstrumented\nfun:_ZNSt3__19__num_getIcE17__stage2_int_loopEciPcRS2_RjcRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPjRSD_S2_=uninstrumented\nfun:_ZNSt3__19__num_getIcE17__stage2_int_prepERNS_8ios_baseEPcRc=uninstrumented\nfun:_ZNSt3__19__num_getIcE19__stage2_float_loopEcRbRcPcRS4_ccRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPjRSE_RjS4_=uninstrumented\nfun:_ZNSt3__19__num_getIcE19__stage2_float_prepERNS_8ios_baseEPcRcS5_=uninstrumented\nfun:_ZNSt3__19__num_getIwE17__stage2_int_loopEwiPcRS2_RjwRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPjRSD_Pw=uninstrumented\nfun:_ZNSt3__19__num_getIwE17__stage2_int_prepERNS_8ios_baseEPwRw=uninstrumented\nfun:_ZNSt3__19__num_getIwE19__stage2_float_loopEwRbRcPcRS4_wwRKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEEPjRSE_RjPw=uninstrumented\nfun:_ZNSt3__19__num_getIwE19__stage2_float_prepERNS_8ios_baseEPwRwS5_=uninstrumented\nfun:_ZNSt3__19__num_putIcE21__widen_and_group_intEPcS2_S2_S2_RS2_S3_RKNS_6localeE=uninstrumented\nfun:_ZNSt3__19__num_putIcE23__widen_and_group_floatEPcS2_S2_S2_RS2_S3_RKNS_6localeE=uninstrumented\nfun:_ZNSt3__19__num_putIwE21__widen_and_group_intEPcS2_S2_PwRS3_S4_RKNS_6localeE=uninstrumented\nfun:_ZNSt3__19__num_putIwE23__widen_and_group_floatEPcS2_S2_PwRS3_S4_RKNS_6localeE=uninstrumented\nfun:_ZNSt3__19basic_iosIcNS_11char_traitsIcEEE7copyfmtERKS3_=uninstrumented\nfun:_ZNSt3__19basic_iosIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZNSt3__19basic_iosIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZNSt3__19basic_iosIcNS_11char_traitsIcEEED2Ev=uninstrumented\nfun:_ZNSt3__19basic_iosIwNS_11char_traitsIwEEE7copyfmtERKS3_=uninstrumented\nfun:_ZNSt3__19basic_iosIwNS_11char_traitsIwEEED0Ev=uninstrumented\nfun:_ZNSt3__19basic_iosIwNS_11char_traitsIwEEED1Ev=uninstrumented\nfun:_ZNSt3__19basic_iosIwNS_11char_traitsIwEEED2Ev=uninstrumented\nfun:_ZNSt3__19money_getIcNS_19istreambuf_iteratorIcNS_11char_traitsIcEEEEE8__do_getERS4_S4_bRKNS_6localeEjRjRbRKNS_5ctypeIcEERNS_10unique_ptrIcPFvPvEEERPcSM_=uninstrumented\nfun:_ZNSt3__19money_getIwNS_19istreambuf_iteratorIwNS_11char_traitsIwEEEEE8__do_getERS4_S4_bRKNS_6localeEjRjRbRKNS_5ctypeIwEERNS_10unique_ptrIwPFvPvEEERPwSM_=uninstrumented\nfun:_ZNSt3__19strstreamD0Ev=uninstrumented\nfun:_ZNSt3__19strstreamD1Ev=uninstrumented\nfun:_ZNSt3__19strstreamD2Ev=uninstrumented\nfun:_ZNSt3__19to_stringEd=uninstrumented\nfun:_ZNSt3__19to_stringEe=uninstrumented\nfun:_ZNSt3__19to_stringEf=uninstrumented\nfun:_ZNSt3__19to_stringEi=uninstrumented\nfun:_ZNSt3__19to_stringEj=uninstrumented\nfun:_ZNSt3__19to_stringEl=uninstrumented\nfun:_ZNSt3__19to_stringEm=uninstrumented\nfun:_ZNSt3__19to_stringEx=uninstrumented\nfun:_ZNSt3__19to_stringEy=uninstrumented\nfun:_ZNSt3__1plIcNS_11char_traitsIcEENS_9allocatorIcEEEENS_12basic_stringIT_T0_T1_EEPKS6_RKS9_=uninstrumented\nfun:_ZNSt8bad_castC1Ev=uninstrumented\nfun:_ZNSt8bad_castC2Ev=uninstrumented\nfun:_ZNSt8bad_castD0Ev=uninstrumented\nfun:_ZNSt8bad_castD1Ev=uninstrumented\nfun:_ZNSt8bad_castD2Ev=uninstrumented\nfun:_ZNSt9bad_allocC1Ev=uninstrumented\nfun:_ZNSt9bad_allocC2Ev=uninstrumented\nfun:_ZNSt9bad_allocD0Ev=uninstrumented\nfun:_ZNSt9bad_allocD1Ev=uninstrumented\nfun:_ZNSt9bad_allocD2Ev=uninstrumented\nfun:_ZNSt9exceptionD0Ev=uninstrumented\nfun:_ZNSt9exceptionD1Ev=uninstrumented\nfun:_ZNSt9exceptionD2Ev=uninstrumented\nfun:_ZNSt9type_infoD0Ev=uninstrumented\nfun:_ZNSt9type_infoD1Ev=uninstrumented\nfun:_ZNSt9type_infoD2Ev=uninstrumented\nfun:_ZSt10unexpectedv=uninstrumented\nfun:_ZSt13get_terminatev=uninstrumented\nfun:_ZSt13set_terminatePFvvE=uninstrumented\nfun:_ZSt14get_unexpectedv=uninstrumented\nfun:_ZSt14set_unexpectedPFvvE=uninstrumented\nfun:_ZSt15get_new_handlerv=uninstrumented\nfun:_ZSt15set_new_handlerPFvvE=uninstrumented\nfun:_ZSt17__throw_bad_allocv=uninstrumented\nfun:_ZSt17current_exceptionv=uninstrumented\nfun:_ZSt17rethrow_exceptionSt13exception_ptr=uninstrumented\nfun:_ZSt18uncaught_exceptionv=uninstrumented\nfun:_ZSt19uncaught_exceptionsv=uninstrumented\nfun:_ZSt9terminatev=uninstrumented\nfun:_ZThn16_NSt3__114basic_iostreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZThn16_NSt3__114basic_iostreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZThn16_NSt3__19strstreamD0Ev=uninstrumented\nfun:_ZThn16_NSt3__19strstreamD1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__110istrstreamD0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__110istrstreamD1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__110ostrstreamD0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__110ostrstreamD1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_istreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_istreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_istreamIwNS_11char_traitsIwEEED0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_istreamIwNS_11char_traitsIwEEED1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_ostreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_ostreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_ostreamIwNS_11char_traitsIwEEED0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__113basic_ostreamIwNS_11char_traitsIwEEED1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__114basic_iostreamIcNS_11char_traitsIcEEED0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__114basic_iostreamIcNS_11char_traitsIcEEED1Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__19strstreamD0Ev=uninstrumented\nfun:_ZTv0_n24_NSt3__19strstreamD1Ev=uninstrumented\nfun:_ZdaPv=uninstrumented\nfun:_ZdaPvRKSt9nothrow_t=uninstrumented\nfun:_ZdaPvSt11align_val_t=uninstrumented\nfun:_ZdaPvSt11align_val_tRKSt9nothrow_t=uninstrumented\nfun:_ZdaPvm=uninstrumented\nfun:_ZdaPvmSt11align_val_t=uninstrumented\nfun:_ZdlPv=uninstrumented\nfun:_ZdlPvRKSt9nothrow_t=uninstrumented\nfun:_ZdlPvSt11align_val_t=uninstrumented\nfun:_ZdlPvSt11align_val_tRKSt9nothrow_t=uninstrumented\nfun:_ZdlPvm=uninstrumented\nfun:_ZdlPvmSt11align_val_t=uninstrumented\nfun:_Znam=uninstrumented\nfun:_ZnamRKSt9nothrow_t=uninstrumented\nfun:_ZnamSt11align_val_t=uninstrumented\nfun:_ZnamSt11align_val_tRKSt9nothrow_t=uninstrumented\nfun:_Znwm=uninstrumented\nfun:_ZnwmRKSt9nothrow_t=uninstrumented\nfun:_ZnwmSt11align_val_t=uninstrumented\nfun:_ZnwmSt11align_val_tRKSt9nothrow_t=uninstrumented\nfun:__cxa_allocate_dependent_exception=uninstrumented\nfun:__cxa_allocate_exception=uninstrumented\nfun:__cxa_bad_cast=uninstrumented\nfun:__cxa_bad_typeid=uninstrumented\nfun:__cxa_begin_catch=uninstrumented\nfun:__cxa_call_unexpected=uninstrumented\nfun:__cxa_current_exception_type=uninstrumented\nfun:__cxa_current_primary_exception=uninstrumented\nfun:__cxa_decrement_exception_refcount=uninstrumented\nfun:__cxa_deleted_virtual=uninstrumented\nfun:__cxa_demangle=uninstrumented\nfun:__cxa_end_catch=uninstrumented\nfun:__cxa_free_dependent_exception=uninstrumented\nfun:__cxa_free_exception=uninstrumented\nfun:__cxa_get_exception_ptr=uninstrumented\nfun:__cxa_get_globals=uninstrumented\nfun:__cxa_get_globals_fast=uninstrumented\nfun:__cxa_guard_abort=uninstrumented\nfun:__cxa_guard_acquire=uninstrumented\nfun:__cxa_guard_release=uninstrumented\nfun:__cxa_increment_exception_refcount=uninstrumented\nfun:__cxa_pure_virtual=uninstrumented\nfun:__cxa_rethrow=uninstrumented\nfun:__cxa_rethrow_primary_exception=uninstrumented\nfun:__cxa_thread_atexit=uninstrumented\nfun:__cxa_throw=uninstrumented\nfun:__cxa_throw_bad_array_new_length=uninstrumented\nfun:__cxa_uncaught_exception=uninstrumented\nfun:__cxa_uncaught_exceptions=uninstrumented\nfun:__cxa_vec_cctor=uninstrumented\nfun:__cxa_vec_cleanup=uninstrumented\nfun:__cxa_vec_ctor=uninstrumented\nfun:__cxa_vec_delete=uninstrumented\nfun:__cxa_vec_delete2=uninstrumented\nfun:__cxa_vec_delete3=uninstrumented\nfun:__cxa_vec_dtor=uninstrumented\nfun:__cxa_vec_new=uninstrumented\nfun:__cxa_vec_new2=uninstrumented\nfun:__cxa_vec_new3=uninstrumented\nfun:__divti3=uninstrumented\nfun:__dynamic_cast=uninstrumented\nfun:__gxx_personality_v0=uninstrumented\nfun:__udivmodti4=uninstrumented\n"
  },
  {
    "path": "runtime/dfsan/libc_ubuntu1404_abilist.txt",
    "content": "fun:_Exit=uninstrumented\nfun:_IO_adjust_column=uninstrumented\nfun:_IO_adjust_wcolumn=uninstrumented\nfun:_IO_default_doallocate=uninstrumented\nfun:_IO_default_finish=uninstrumented\nfun:_IO_default_pbackfail=uninstrumented\nfun:_IO_default_uflow=uninstrumented\nfun:_IO_default_xsgetn=uninstrumented\nfun:_IO_default_xsputn=uninstrumented\nfun:_IO_do_write=uninstrumented\nfun:_IO_doallocbuf=uninstrumented\nfun:_IO_fclose=uninstrumented\nfun:_IO_fdopen=uninstrumented\nfun:_IO_feof=uninstrumented\nfun:_IO_ferror=uninstrumented\nfun:_IO_fflush=uninstrumented\nfun:_IO_fgetpos=uninstrumented\nfun:_IO_fgetpos64=uninstrumented\nfun:_IO_fgets=uninstrumented\nfun:_IO_file_attach=uninstrumented\nfun:_IO_file_close=uninstrumented\nfun:_IO_file_close_it=uninstrumented\nfun:_IO_file_doallocate=uninstrumented\nfun:_IO_file_finish=uninstrumented\nfun:_IO_file_fopen=uninstrumented\nfun:_IO_file_init=uninstrumented\nfun:_IO_file_open=uninstrumented\nfun:_IO_file_overflow=uninstrumented\nfun:_IO_file_read=uninstrumented\nfun:_IO_file_seek=uninstrumented\nfun:_IO_file_seekoff=uninstrumented\nfun:_IO_file_setbuf=uninstrumented\nfun:_IO_file_stat=uninstrumented\nfun:_IO_file_sync=uninstrumented\nfun:_IO_file_underflow=uninstrumented\nfun:_IO_file_write=uninstrumented\nfun:_IO_file_xsputn=uninstrumented\nfun:_IO_flockfile=uninstrumented\nfun:_IO_flush_all=uninstrumented\nfun:_IO_flush_all_linebuffered=uninstrumented\nfun:_IO_fopen=uninstrumented\nfun:_IO_fprintf=uninstrumented\nfun:_IO_fputs=uninstrumented\nfun:_IO_fread=uninstrumented\nfun:_IO_free_backup_area=uninstrumented\nfun:_IO_free_wbackup_area=uninstrumented\nfun:_IO_fsetpos=uninstrumented\nfun:_IO_fsetpos64=uninstrumented\nfun:_IO_ftell=uninstrumented\nfun:_IO_ftrylockfile=uninstrumented\nfun:_IO_funlockfile=uninstrumented\nfun:_IO_fwrite=uninstrumented\nfun:_IO_getc=uninstrumented\nfun:_IO_getline=uninstrumented\nfun:_IO_getline_info=uninstrumented\nfun:_IO_gets=uninstrumented\nfun:_IO_init=uninstrumented\nfun:_IO_init_marker=uninstrumented\nfun:_IO_init_wmarker=uninstrumented\nfun:_IO_iter_begin=uninstrumented\nfun:_IO_iter_end=uninstrumented\nfun:_IO_iter_file=uninstrumented\nfun:_IO_iter_next=uninstrumented\nfun:_IO_least_wmarker=uninstrumented\nfun:_IO_link_in=uninstrumented\nfun:_IO_list_lock=uninstrumented\nfun:_IO_list_resetlock=uninstrumented\nfun:_IO_list_unlock=uninstrumented\nfun:_IO_marker_delta=uninstrumented\nfun:_IO_marker_difference=uninstrumented\nfun:_IO_padn=uninstrumented\nfun:_IO_peekc_locked=uninstrumented\nfun:_IO_popen=uninstrumented\nfun:_IO_printf=uninstrumented\nfun:_IO_proc_close=uninstrumented\nfun:_IO_proc_open=uninstrumented\nfun:_IO_putc=uninstrumented\nfun:_IO_puts=uninstrumented\nfun:_IO_remove_marker=uninstrumented\nfun:_IO_seekmark=uninstrumented\nfun:_IO_seekoff=uninstrumented\nfun:_IO_seekpos=uninstrumented\nfun:_IO_seekwmark=uninstrumented\nfun:_IO_setb=uninstrumented\nfun:_IO_setbuffer=uninstrumented\nfun:_IO_setvbuf=uninstrumented\nfun:_IO_sgetn=uninstrumented\nfun:_IO_sprintf=uninstrumented\nfun:_IO_sputbackc=uninstrumented\nfun:_IO_sputbackwc=uninstrumented\nfun:_IO_sscanf=uninstrumented\nfun:_IO_str_init_readonly=uninstrumented\nfun:_IO_str_init_static=uninstrumented\nfun:_IO_str_overflow=uninstrumented\nfun:_IO_str_pbackfail=uninstrumented\nfun:_IO_str_seekoff=uninstrumented\nfun:_IO_str_underflow=uninstrumented\nfun:_IO_sungetc=uninstrumented\nfun:_IO_sungetwc=uninstrumented\nfun:_IO_switch_to_get_mode=uninstrumented\nfun:_IO_switch_to_main_wget_area=uninstrumented\nfun:_IO_switch_to_wbackup_area=uninstrumented\nfun:_IO_switch_to_wget_mode=uninstrumented\nfun:_IO_un_link=uninstrumented\nfun:_IO_ungetc=uninstrumented\nfun:_IO_unsave_markers=uninstrumented\nfun:_IO_unsave_wmarkers=uninstrumented\nfun:_IO_vfprintf=uninstrumented\nfun:_IO_vfscanf=uninstrumented\nfun:_IO_vsprintf=uninstrumented\nfun:_IO_wdefault_doallocate=uninstrumented\nfun:_IO_wdefault_finish=uninstrumented\nfun:_IO_wdefault_pbackfail=uninstrumented\nfun:_IO_wdefault_uflow=uninstrumented\nfun:_IO_wdefault_xsgetn=uninstrumented\nfun:_IO_wdefault_xsputn=uninstrumented\nfun:_IO_wdo_write=uninstrumented\nfun:_IO_wdoallocbuf=uninstrumented\nfun:_IO_wfile_overflow=uninstrumented\nfun:_IO_wfile_seekoff=uninstrumented\nfun:_IO_wfile_sync=uninstrumented\nfun:_IO_wfile_underflow=uninstrumented\nfun:_IO_wfile_xsputn=uninstrumented\nfun:_IO_wmarker_delta=uninstrumented\nfun:_IO_wsetb=uninstrumented\nfun:_Unwind_Backtrace=uninstrumented\nfun:_Unwind_DeleteException=uninstrumented\nfun:_Unwind_FindEnclosingFunction=uninstrumented\nfun:_Unwind_Find_FDE=uninstrumented\nfun:_Unwind_ForcedUnwind=uninstrumented\nfun:_Unwind_GetCFA=uninstrumented\nfun:_Unwind_GetDataRelBase=uninstrumented\nfun:_Unwind_GetGR=uninstrumented\nfun:_Unwind_GetIP=uninstrumented\nfun:_Unwind_GetIPInfo=uninstrumented\nfun:_Unwind_GetLanguageSpecificData=uninstrumented\nfun:_Unwind_GetRegionStart=uninstrumented\nfun:_Unwind_GetTextRelBase=uninstrumented\nfun:_Unwind_RaiseException=uninstrumented\nfun:_Unwind_Resume=uninstrumented\nfun:_Unwind_Resume_or_Rethrow=uninstrumented\nfun:_Unwind_SetGR=uninstrumented\nfun:_Unwind_SetIP=uninstrumented\nfun:__absvdi2=uninstrumented\nfun:__absvsi2=uninstrumented\nfun:__absvti2=uninstrumented\nfun:__acos_finite=uninstrumented\nfun:__acosf_finite=uninstrumented\nfun:__acosh_finite=uninstrumented\nfun:__acoshf_finite=uninstrumented\nfun:__acoshl_finite=uninstrumented\nfun:__acosl_finite=uninstrumented\nfun:__addtf3=uninstrumented\nfun:__addvdi3=uninstrumented\nfun:__addvsi3=uninstrumented\nfun:__addvti3=uninstrumented\nfun:__adjtimex=uninstrumented\nfun:__arch_prctl=uninstrumented\nfun:__argz_count=uninstrumented\nfun:__argz_next=uninstrumented\nfun:__argz_stringify=uninstrumented\nfun:__ashlti3=uninstrumented\nfun:__ashrti3=uninstrumented\nfun:__asin_finite=uninstrumented\nfun:__asinf_finite=uninstrumented\nfun:__asinl_finite=uninstrumented\nfun:__asprintf=uninstrumented\nfun:__asprintf_chk=uninstrumented\nfun:__assert=uninstrumented\nfun:__assert_fail=uninstrumented\nfun:__assert_perror_fail=uninstrumented\nfun:__atan2_finite=uninstrumented\nfun:__atan2f_finite=uninstrumented\nfun:__atan2l_finite=uninstrumented\nfun:__atanh_finite=uninstrumented\nfun:__atanhf_finite=uninstrumented\nfun:__atanhl_finite=uninstrumented\nfun:__b64_ntop=uninstrumented\nfun:__b64_pton=uninstrumented\nfun:__backtrace=uninstrumented\nfun:__backtrace_symbols=uninstrumented\nfun:__backtrace_symbols_fd=uninstrumented\nfun:__bid128_abs=uninstrumented\nfun:__bid128_add=uninstrumented\nfun:__bid128_class=uninstrumented\nfun:__bid128_copy=uninstrumented\nfun:__bid128_copySign=uninstrumented\nfun:__bid128_div=uninstrumented\nfun:__bid128_fma=uninstrumented\nfun:__bid128_from_int32=uninstrumented\nfun:__bid128_from_int64=uninstrumented\nfun:__bid128_from_uint32=uninstrumented\nfun:__bid128_from_uint64=uninstrumented\nfun:__bid128_isCanonical=uninstrumented\nfun:__bid128_isFinite=uninstrumented\nfun:__bid128_isInf=uninstrumented\nfun:__bid128_isNaN=uninstrumented\nfun:__bid128_isNormal=uninstrumented\nfun:__bid128_isSignaling=uninstrumented\nfun:__bid128_isSigned=uninstrumented\nfun:__bid128_isSubnormal=uninstrumented\nfun:__bid128_isZero=uninstrumented\nfun:__bid128_mul=uninstrumented\nfun:__bid128_negate=uninstrumented\nfun:__bid128_quiet_equal=uninstrumented\nfun:__bid128_quiet_greater=uninstrumented\nfun:__bid128_quiet_greater_equal=uninstrumented\nfun:__bid128_quiet_greater_unordered=uninstrumented\nfun:__bid128_quiet_less=uninstrumented\nfun:__bid128_quiet_less_equal=uninstrumented\nfun:__bid128_quiet_less_unordered=uninstrumented\nfun:__bid128_quiet_not_equal=uninstrumented\nfun:__bid128_quiet_not_greater=uninstrumented\nfun:__bid128_quiet_not_less=uninstrumented\nfun:__bid128_quiet_ordered=uninstrumented\nfun:__bid128_quiet_unordered=uninstrumented\nfun:__bid128_radix=uninstrumented\nfun:__bid128_sameQuantum=uninstrumented\nfun:__bid128_signaling_greater=uninstrumented\nfun:__bid128_signaling_greater_equal=uninstrumented\nfun:__bid128_signaling_greater_unordered=uninstrumented\nfun:__bid128_signaling_less=uninstrumented\nfun:__bid128_signaling_less_equal=uninstrumented\nfun:__bid128_signaling_less_unordered=uninstrumented\nfun:__bid128_signaling_not_greater=uninstrumented\nfun:__bid128_signaling_not_less=uninstrumented\nfun:__bid128_sub=uninstrumented\nfun:__bid128_to_bid32=uninstrumented\nfun:__bid128_to_bid64=uninstrumented\nfun:__bid128_to_binary128=uninstrumented\nfun:__bid128_to_binary32=uninstrumented\nfun:__bid128_to_binary64=uninstrumented\nfun:__bid128_to_binary80=uninstrumented\nfun:__bid128_to_int32_ceil=uninstrumented\nfun:__bid128_to_int32_floor=uninstrumented\nfun:__bid128_to_int32_int=uninstrumented\nfun:__bid128_to_int32_rnint=uninstrumented\nfun:__bid128_to_int32_rninta=uninstrumented\nfun:__bid128_to_int32_xceil=uninstrumented\nfun:__bid128_to_int32_xfloor=uninstrumented\nfun:__bid128_to_int32_xint=uninstrumented\nfun:__bid128_to_int32_xrnint=uninstrumented\nfun:__bid128_to_int32_xrninta=uninstrumented\nfun:__bid128_to_int64_ceil=uninstrumented\nfun:__bid128_to_int64_floor=uninstrumented\nfun:__bid128_to_int64_int=uninstrumented\nfun:__bid128_to_int64_rnint=uninstrumented\nfun:__bid128_to_int64_rninta=uninstrumented\nfun:__bid128_to_int64_xceil=uninstrumented\nfun:__bid128_to_int64_xfloor=uninstrumented\nfun:__bid128_to_int64_xint=uninstrumented\nfun:__bid128_to_int64_xrnint=uninstrumented\nfun:__bid128_to_int64_xrninta=uninstrumented\nfun:__bid128_to_uint32_ceil=uninstrumented\nfun:__bid128_to_uint32_floor=uninstrumented\nfun:__bid128_to_uint32_int=uninstrumented\nfun:__bid128_to_uint32_rnint=uninstrumented\nfun:__bid128_to_uint32_rninta=uninstrumented\nfun:__bid128_to_uint32_xceil=uninstrumented\nfun:__bid128_to_uint32_xfloor=uninstrumented\nfun:__bid128_to_uint32_xint=uninstrumented\nfun:__bid128_to_uint32_xrnint=uninstrumented\nfun:__bid128_to_uint32_xrninta=uninstrumented\nfun:__bid128_to_uint64_ceil=uninstrumented\nfun:__bid128_to_uint64_floor=uninstrumented\nfun:__bid128_to_uint64_int=uninstrumented\nfun:__bid128_to_uint64_rnint=uninstrumented\nfun:__bid128_to_uint64_rninta=uninstrumented\nfun:__bid128_to_uint64_xceil=uninstrumented\nfun:__bid128_to_uint64_xfloor=uninstrumented\nfun:__bid128_to_uint64_xint=uninstrumented\nfun:__bid128_to_uint64_xrnint=uninstrumented\nfun:__bid128_to_uint64_xrninta=uninstrumented\nfun:__bid128_totalOrder=uninstrumented\nfun:__bid128_totalOrderMag=uninstrumented\nfun:__bid128dd_add=uninstrumented\nfun:__bid128dd_div=uninstrumented\nfun:__bid128dd_mul=uninstrumented\nfun:__bid128dd_sub=uninstrumented\nfun:__bid128ddd_fma=uninstrumented\nfun:__bid128ddq_fma=uninstrumented\nfun:__bid128dq_add=uninstrumented\nfun:__bid128dq_div=uninstrumented\nfun:__bid128dq_mul=uninstrumented\nfun:__bid128dq_sub=uninstrumented\nfun:__bid128dqd_fma=uninstrumented\nfun:__bid128dqq_fma=uninstrumented\nfun:__bid128qd_add=uninstrumented\nfun:__bid128qd_div=uninstrumented\nfun:__bid128qd_mul=uninstrumented\nfun:__bid128qd_sub=uninstrumented\nfun:__bid128qdd_fma=uninstrumented\nfun:__bid128qdq_fma=uninstrumented\nfun:__bid128qqd_fma=uninstrumented\nfun:__bid32_to_bid128=uninstrumented\nfun:__bid32_to_bid64=uninstrumented\nfun:__bid32_to_binary128=uninstrumented\nfun:__bid32_to_binary32=uninstrumented\nfun:__bid32_to_binary64=uninstrumented\nfun:__bid32_to_binary80=uninstrumented\nfun:__bid64_abs=uninstrumented\nfun:__bid64_add=uninstrumented\nfun:__bid64_class=uninstrumented\nfun:__bid64_copy=uninstrumented\nfun:__bid64_copySign=uninstrumented\nfun:__bid64_div=uninstrumented\nfun:__bid64_from_int32=uninstrumented\nfun:__bid64_from_int64=uninstrumented\nfun:__bid64_from_uint32=uninstrumented\nfun:__bid64_from_uint64=uninstrumented\nfun:__bid64_isCanonical=uninstrumented\nfun:__bid64_isFinite=uninstrumented\nfun:__bid64_isInf=uninstrumented\nfun:__bid64_isNaN=uninstrumented\nfun:__bid64_isNormal=uninstrumented\nfun:__bid64_isSignaling=uninstrumented\nfun:__bid64_isSigned=uninstrumented\nfun:__bid64_isSubnormal=uninstrumented\nfun:__bid64_isZero=uninstrumented\nfun:__bid64_mul=uninstrumented\nfun:__bid64_negate=uninstrumented\nfun:__bid64_quiet_equal=uninstrumented\nfun:__bid64_quiet_greater=uninstrumented\nfun:__bid64_quiet_greater_equal=uninstrumented\nfun:__bid64_quiet_greater_unordered=uninstrumented\nfun:__bid64_quiet_less=uninstrumented\nfun:__bid64_quiet_less_equal=uninstrumented\nfun:__bid64_quiet_less_unordered=uninstrumented\nfun:__bid64_quiet_not_equal=uninstrumented\nfun:__bid64_quiet_not_greater=uninstrumented\nfun:__bid64_quiet_not_less=uninstrumented\nfun:__bid64_quiet_ordered=uninstrumented\nfun:__bid64_quiet_unordered=uninstrumented\nfun:__bid64_radix=uninstrumented\nfun:__bid64_sameQuantum=uninstrumented\nfun:__bid64_signaling_greater=uninstrumented\nfun:__bid64_signaling_greater_equal=uninstrumented\nfun:__bid64_signaling_greater_unordered=uninstrumented\nfun:__bid64_signaling_less=uninstrumented\nfun:__bid64_signaling_less_equal=uninstrumented\nfun:__bid64_signaling_less_unordered=uninstrumented\nfun:__bid64_signaling_not_greater=uninstrumented\nfun:__bid64_signaling_not_less=uninstrumented\nfun:__bid64_sub=uninstrumented\nfun:__bid64_to_bid128=uninstrumented\nfun:__bid64_to_bid32=uninstrumented\nfun:__bid64_to_binary128=uninstrumented\nfun:__bid64_to_binary32=uninstrumented\nfun:__bid64_to_binary64=uninstrumented\nfun:__bid64_to_binary80=uninstrumented\nfun:__bid64_to_int32_ceil=uninstrumented\nfun:__bid64_to_int32_floor=uninstrumented\nfun:__bid64_to_int32_int=uninstrumented\nfun:__bid64_to_int32_rnint=uninstrumented\nfun:__bid64_to_int32_rninta=uninstrumented\nfun:__bid64_to_int32_xceil=uninstrumented\nfun:__bid64_to_int32_xfloor=uninstrumented\nfun:__bid64_to_int32_xint=uninstrumented\nfun:__bid64_to_int32_xrnint=uninstrumented\nfun:__bid64_to_int32_xrninta=uninstrumented\nfun:__bid64_to_int64_ceil=uninstrumented\nfun:__bid64_to_int64_floor=uninstrumented\nfun:__bid64_to_int64_int=uninstrumented\nfun:__bid64_to_int64_rnint=uninstrumented\nfun:__bid64_to_int64_rninta=uninstrumented\nfun:__bid64_to_int64_xceil=uninstrumented\nfun:__bid64_to_int64_xfloor=uninstrumented\nfun:__bid64_to_int64_xint=uninstrumented\nfun:__bid64_to_int64_xrnint=uninstrumented\nfun:__bid64_to_int64_xrninta=uninstrumented\nfun:__bid64_to_uint32_ceil=uninstrumented\nfun:__bid64_to_uint32_floor=uninstrumented\nfun:__bid64_to_uint32_int=uninstrumented\nfun:__bid64_to_uint32_rnint=uninstrumented\nfun:__bid64_to_uint32_rninta=uninstrumented\nfun:__bid64_to_uint32_xceil=uninstrumented\nfun:__bid64_to_uint32_xfloor=uninstrumented\nfun:__bid64_to_uint32_xint=uninstrumented\nfun:__bid64_to_uint32_xrnint=uninstrumented\nfun:__bid64_to_uint32_xrninta=uninstrumented\nfun:__bid64_to_uint64_ceil=uninstrumented\nfun:__bid64_to_uint64_floor=uninstrumented\nfun:__bid64_to_uint64_int=uninstrumented\nfun:__bid64_to_uint64_rnint=uninstrumented\nfun:__bid64_to_uint64_rninta=uninstrumented\nfun:__bid64_to_uint64_xceil=uninstrumented\nfun:__bid64_to_uint64_xfloor=uninstrumented\nfun:__bid64_to_uint64_xint=uninstrumented\nfun:__bid64_to_uint64_xrnint=uninstrumented\nfun:__bid64_to_uint64_xrninta=uninstrumented\nfun:__bid64_totalOrder=uninstrumented\nfun:__bid64_totalOrderMag=uninstrumented\nfun:__bid64ddq_fma=uninstrumented\nfun:__bid64dq_add=uninstrumented\nfun:__bid64dq_div=uninstrumented\nfun:__bid64dq_mul=uninstrumented\nfun:__bid64dq_sub=uninstrumented\nfun:__bid64dqd_fma=uninstrumented\nfun:__bid64dqq_fma=uninstrumented\nfun:__bid64qd_add=uninstrumented\nfun:__bid64qd_div=uninstrumented\nfun:__bid64qd_mul=uninstrumented\nfun:__bid64qd_sub=uninstrumented\nfun:__bid64qdd_fma=uninstrumented\nfun:__bid64qdq_fma=uninstrumented\nfun:__bid64qq_add=uninstrumented\nfun:__bid64qq_div=uninstrumented\nfun:__bid64qq_mul=uninstrumented\nfun:__bid64qq_sub=uninstrumented\nfun:__bid64qqd_fma=uninstrumented\nfun:__bid64qqq_fma=uninstrumented\nfun:__bid_adddd3=uninstrumented\nfun:__bid_addsd3=uninstrumented\nfun:__bid_addtd3=uninstrumented\nfun:__bid_divdd3=uninstrumented\nfun:__bid_divsd3=uninstrumented\nfun:__bid_divtd3=uninstrumented\nfun:__bid_eqdd2=uninstrumented\nfun:__bid_eqsd2=uninstrumented\nfun:__bid_eqtd2=uninstrumented\nfun:__bid_extendddtd2=uninstrumented\nfun:__bid_extendddtf=uninstrumented\nfun:__bid_extendddxf=uninstrumented\nfun:__bid_extenddfdd=uninstrumented\nfun:__bid_extenddftd=uninstrumented\nfun:__bid_extendsddd2=uninstrumented\nfun:__bid_extendsddf=uninstrumented\nfun:__bid_extendsdtd2=uninstrumented\nfun:__bid_extendsdtf=uninstrumented\nfun:__bid_extendsdxf=uninstrumented\nfun:__bid_extendsfdd=uninstrumented\nfun:__bid_extendsfsd=uninstrumented\nfun:__bid_extendsftd=uninstrumented\nfun:__bid_extendtftd=uninstrumented\nfun:__bid_extendxftd=uninstrumented\nfun:__bid_fixdddi=uninstrumented\nfun:__bid_fixddsi=uninstrumented\nfun:__bid_fixsddi=uninstrumented\nfun:__bid_fixsdsi=uninstrumented\nfun:__bid_fixtddi=uninstrumented\nfun:__bid_fixtdsi=uninstrumented\nfun:__bid_fixunsdddi=uninstrumented\nfun:__bid_fixunsddsi=uninstrumented\nfun:__bid_fixunssddi=uninstrumented\nfun:__bid_fixunssdsi=uninstrumented\nfun:__bid_fixunstddi=uninstrumented\nfun:__bid_fixunstdsi=uninstrumented\nfun:__bid_floatdidd=uninstrumented\nfun:__bid_floatdisd=uninstrumented\nfun:__bid_floatditd=uninstrumented\nfun:__bid_floatsidd=uninstrumented\nfun:__bid_floatsisd=uninstrumented\nfun:__bid_floatsitd=uninstrumented\nfun:__bid_floatunsdidd=uninstrumented\nfun:__bid_floatunsdisd=uninstrumented\nfun:__bid_floatunsditd=uninstrumented\nfun:__bid_floatunssidd=uninstrumented\nfun:__bid_floatunssisd=uninstrumented\nfun:__bid_floatunssitd=uninstrumented\nfun:__bid_gedd2=uninstrumented\nfun:__bid_gesd2=uninstrumented\nfun:__bid_getd2=uninstrumented\nfun:__bid_gtdd2=uninstrumented\nfun:__bid_gtsd2=uninstrumented\nfun:__bid_gttd2=uninstrumented\nfun:__bid_ledd2=uninstrumented\nfun:__bid_lesd2=uninstrumented\nfun:__bid_letd2=uninstrumented\nfun:__bid_ltdd2=uninstrumented\nfun:__bid_ltsd2=uninstrumented\nfun:__bid_lttd2=uninstrumented\nfun:__bid_muldd3=uninstrumented\nfun:__bid_mulsd3=uninstrumented\nfun:__bid_multd3=uninstrumented\nfun:__bid_nedd2=uninstrumented\nfun:__bid_nesd2=uninstrumented\nfun:__bid_netd2=uninstrumented\nfun:__bid_round128_19_38=uninstrumented\nfun:__bid_round192_39_57=uninstrumented\nfun:__bid_round256_58_76=uninstrumented\nfun:__bid_round64_2_18=uninstrumented\nfun:__bid_subdd3=uninstrumented\nfun:__bid_subsd3=uninstrumented\nfun:__bid_subtd3=uninstrumented\nfun:__bid_truncdddf=uninstrumented\nfun:__bid_truncddsd2=uninstrumented\nfun:__bid_truncddsf=uninstrumented\nfun:__bid_truncdfsd=uninstrumented\nfun:__bid_truncsdsf=uninstrumented\nfun:__bid_trunctddd2=uninstrumented\nfun:__bid_trunctddf=uninstrumented\nfun:__bid_trunctdsd2=uninstrumented\nfun:__bid_trunctdsf=uninstrumented\nfun:__bid_trunctdtf=uninstrumented\nfun:__bid_trunctdxf=uninstrumented\nfun:__bid_trunctfdd=uninstrumented\nfun:__bid_trunctfsd=uninstrumented\nfun:__bid_truncxfdd=uninstrumented\nfun:__bid_truncxfsd=uninstrumented\nfun:__bid_unorddd2=uninstrumented\nfun:__bid_unordsd2=uninstrumented\nfun:__bid_unordtd2=uninstrumented\nfun:__binary128_to_bid128=uninstrumented\nfun:__binary128_to_bid32=uninstrumented\nfun:__binary128_to_bid64=uninstrumented\nfun:__binary32_to_bid128=uninstrumented\nfun:__binary32_to_bid32=uninstrumented\nfun:__binary32_to_bid64=uninstrumented\nfun:__binary64_to_bid128=uninstrumented\nfun:__binary64_to_bid32=uninstrumented\nfun:__binary64_to_bid64=uninstrumented\nfun:__binary80_to_bid128=uninstrumented\nfun:__binary80_to_bid32=uninstrumented\nfun:__binary80_to_bid64=uninstrumented\nfun:__bsd_getpgrp=uninstrumented\nfun:__bswapdi2=uninstrumented\nfun:__bswapsi2=uninstrumented\nfun:__bzero=uninstrumented\nfun:__call_tls_dtors=uninstrumented\nfun:__chk_fail=uninstrumented\nfun:__clear_cache=uninstrumented\nfun:__clock_getcpuclockid=uninstrumented\nfun:__clock_getres=uninstrumented\nfun:__clock_gettime=uninstrumented\nfun:__clock_nanosleep=uninstrumented\nfun:__clock_settime=uninstrumented\nfun:__clog10=uninstrumented\nfun:__clog10f=uninstrumented\nfun:__clog10l=uninstrumented\nfun:__clone=uninstrumented\nfun:__close=uninstrumented\nfun:__clrsbdi2=uninstrumented\nfun:__clrsbti2=uninstrumented\nfun:__clzdi2=uninstrumented\nfun:__clzti2=uninstrumented\nfun:__cmpti2=uninstrumented\nfun:__cmsg_nxthdr=uninstrumented\nfun:__confstr_chk=uninstrumented\nfun:__connect=uninstrumented\nfun:__cosh_finite=uninstrumented\nfun:__coshf_finite=uninstrumented\nfun:__coshl_finite=uninstrumented\nfun:__cpu_indicator_init=uninstrumented\nfun:__create_ib_request=uninstrumented\nfun:__ctype_b_loc=uninstrumented\nfun:__ctype_get_mb_cur_max=uninstrumented\nfun:__ctype_init=uninstrumented\nfun:__ctype_tolower_loc=uninstrumented\nfun:__ctype_toupper_loc=uninstrumented\nfun:__ctzdi2=uninstrumented\nfun:__ctzti2=uninstrumented\nfun:__cxa_at_quick_exit=uninstrumented\nfun:__cxa_atexit=uninstrumented\nfun:__cxa_finalize=uninstrumented\nfun:__cxa_thread_atexit_impl=uninstrumented\nfun:__cyg_profile_func_enter=uninstrumented\nfun:__cyg_profile_func_exit=uninstrumented\nfun:__dcgettext=uninstrumented\nfun:__default_morecore=uninstrumented\nfun:__deregister_frame=uninstrumented\nfun:__deregister_frame_info=uninstrumented\nfun:__deregister_frame_info_bases=uninstrumented\nfun:__dfp_clear_except=uninstrumented\nfun:__dfp_get_round=uninstrumented\nfun:__dfp_raise_except=uninstrumented\nfun:__dfp_set_round=uninstrumented\nfun:__dfp_test_except=uninstrumented\nfun:__dgettext=uninstrumented\nfun:__divdc3=uninstrumented\nfun:__divsc3=uninstrumented\nfun:__divtc3=uninstrumented\nfun:__divtf3=uninstrumented\nfun:__divti3=uninstrumented\nfun:__divxc3=uninstrumented\nfun:__dn_comp=uninstrumented\nfun:__dn_count_labels=uninstrumented\nfun:__dn_expand=uninstrumented\nfun:__dn_skipname=uninstrumented\nfun:__do_niscall3=uninstrumented\nfun:__dprintf_chk=uninstrumented\nfun:__dup2=uninstrumented\nfun:__duplocale=uninstrumented\nfun:__emutls_get_address=uninstrumented\nfun:__emutls_register_common=uninstrumented\nfun:__enable_execute_stack=uninstrumented\nfun:__endmntent=uninstrumented\nfun:__eprintf=uninstrumented\nfun:__eqtf2=uninstrumented\nfun:__errno_location=uninstrumented\nfun:__exp10_finite=uninstrumented\nfun:__exp10f_finite=uninstrumented\nfun:__exp10l_finite=uninstrumented\nfun:__exp2_finite=uninstrumented\nfun:__exp2f_finite=uninstrumented\nfun:__exp2l_finite=uninstrumented\nfun:__exp_finite=uninstrumented\nfun:__expf_finite=uninstrumented\nfun:__expl_finite=uninstrumented\nfun:__extenddftf2=uninstrumented\nfun:__extendsftf2=uninstrumented\nfun:__extendxftf2=uninstrumented\nfun:__fbufsize=uninstrumented\nfun:__fcntl=uninstrumented\nfun:__fdelt_chk=uninstrumented\nfun:__fdelt_warn=uninstrumented\nfun:__fentry__=uninstrumented\nfun:__ffs=uninstrumented\nfun:__ffsdi2=uninstrumented\nfun:__ffsti2=uninstrumented\nfun:__fgets_chk=uninstrumented\nfun:__fgets_unlocked_chk=uninstrumented\nfun:__fgetws_chk=uninstrumented\nfun:__fgetws_unlocked_chk=uninstrumented\nfun:__finite=uninstrumented\nfun:__finitef=uninstrumented\nfun:__finitel=uninstrumented\nfun:__fixdfti=uninstrumented\nfun:__fixsfti=uninstrumented\nfun:__fixtfdi=uninstrumented\nfun:__fixtfsi=uninstrumented\nfun:__fixtfti=uninstrumented\nfun:__fixunsdfdi=uninstrumented\nfun:__fixunsdfti=uninstrumented\nfun:__fixunssfdi=uninstrumented\nfun:__fixunssfti=uninstrumented\nfun:__fixunstfdi=uninstrumented\nfun:__fixunstfsi=uninstrumented\nfun:__fixunstfti=uninstrumented\nfun:__fixunsxfdi=uninstrumented\nfun:__fixunsxfti=uninstrumented\nfun:__fixxfti=uninstrumented\nfun:__flbf=uninstrumented\nfun:__floatditf=uninstrumented\nfun:__floatsitf=uninstrumented\nfun:__floattidf=uninstrumented\nfun:__floattisf=uninstrumented\nfun:__floattitf=uninstrumented\nfun:__floattixf=uninstrumented\nfun:__floatunditf=uninstrumented\nfun:__floatunsitf=uninstrumented\nfun:__floatuntidf=uninstrumented\nfun:__floatuntisf=uninstrumented\nfun:__floatuntitf=uninstrumented\nfun:__floatuntixf=uninstrumented\nfun:__fmod_finite=uninstrumented\nfun:__fmodf_finite=uninstrumented\nfun:__fmodl_finite=uninstrumented\nfun:__follow_path=uninstrumented\nfun:__fork=uninstrumented\nfun:__fortify_fail=uninstrumented\nfun:__fp_nquery=uninstrumented\nfun:__fp_query=uninstrumented\nfun:__fp_resstat=uninstrumented\nfun:__fpclassify=uninstrumented\nfun:__fpclassifyf=uninstrumented\nfun:__fpclassifyl=uninstrumented\nfun:__fpending=uninstrumented\nfun:__fprintf_chk=uninstrumented\nfun:__fpurge=uninstrumented\nfun:__fread_chk=uninstrumented\nfun:__fread_unlocked_chk=uninstrumented\nfun:__freadable=uninstrumented\nfun:__freading=uninstrumented\nfun:__free_fdresult=uninstrumented\nfun:__freelocale=uninstrumented\nfun:__fsetlocking=uninstrumented\nfun:__fstat=uninstrumented\nfun:__fwprintf_chk=uninstrumented\nfun:__fwritable=uninstrumented\nfun:__fwriting=uninstrumented\nfun:__fxstat=uninstrumented\nfun:__fxstat64=uninstrumented\nfun:__fxstatat=uninstrumented\nfun:__fxstatat64=uninstrumented\nfun:__gai_sigqueue=uninstrumented\nfun:__gamma_r_finite=uninstrumented\nfun:__gammaf_r_finite=uninstrumented\nfun:__gammal_r_finite=uninstrumented\nfun:__gcc_bcmp=uninstrumented\nfun:__gcc_personality_v0=uninstrumented\nfun:__gconv_get_alias_db=uninstrumented\nfun:__gconv_get_cache=uninstrumented\nfun:__gconv_get_modules_db=uninstrumented\nfun:__generic_findstack=uninstrumented\nfun:__generic_morestack=uninstrumented\nfun:__generic_morestack_set_initial_sp=uninstrumented\nfun:__generic_releasestack=uninstrumented\nfun:__get_cpu_features=uninstrumented\nfun:__getauxval=uninstrumented\nfun:__getcwd_chk=uninstrumented\nfun:__getdelim=uninstrumented\nfun:__getdomainname_chk=uninstrumented\nfun:__getf2=uninstrumented\nfun:__getgroups_chk=uninstrumented\nfun:__gethostname_chk=uninstrumented\nfun:__getlogin_r_chk=uninstrumented\nfun:__getmntent_r=uninstrumented\nfun:__getpagesize=uninstrumented\nfun:__getpgid=uninstrumented\nfun:__getpid=uninstrumented\nfun:__gets_chk=uninstrumented\nfun:__gettimeofday=uninstrumented\nfun:__getwd_chk=uninstrumented\nfun:__gmtime_r=uninstrumented\nfun:__gttf2=uninstrumented\nfun:__h_errno_location=uninstrumented\nfun:__hostalias=uninstrumented\nfun:__hypot_finite=uninstrumented\nfun:__hypotf_finite=uninstrumented\nfun:__hypotl_finite=uninstrumented\nfun:__internal_endnetgrent=uninstrumented\nfun:__internal_getnetgrent_r=uninstrumented\nfun:__internal_setnetgrent=uninstrumented\nfun:__isalnum_l=uninstrumented\nfun:__isalpha_l=uninstrumented\nfun:__isascii_l=uninstrumented\nfun:__isblank_l=uninstrumented\nfun:__iscntrl_l=uninstrumented\nfun:__isctype=uninstrumented\nfun:__isdigit_l=uninstrumented\nfun:__isgraph_l=uninstrumented\nfun:__isinf=uninstrumented\nfun:__isinff=uninstrumented\nfun:__isinfl=uninstrumented\nfun:__islower_l=uninstrumented\nfun:__isnan=uninstrumented\nfun:__isnanf=uninstrumented\nfun:__isnanl=uninstrumented\nfun:__isoc99_fscanf=uninstrumented\nfun:__isoc99_fwscanf=uninstrumented\nfun:__isoc99_scanf=uninstrumented\nfun:__isoc99_sscanf=uninstrumented\nfun:__isoc99_swscanf=uninstrumented\nfun:__isoc99_vfscanf=uninstrumented\nfun:__isoc99_vfwscanf=uninstrumented\nfun:__isoc99_vscanf=uninstrumented\nfun:__isoc99_vsscanf=uninstrumented\nfun:__isoc99_vswscanf=uninstrumented\nfun:__isoc99_vwscanf=uninstrumented\nfun:__isoc99_wscanf=uninstrumented\nfun:__isprint_l=uninstrumented\nfun:__ispunct_l=uninstrumented\nfun:__issignaling=uninstrumented\nfun:__issignalingf=uninstrumented\nfun:__issignalingl=uninstrumented\nfun:__isspace_l=uninstrumented\nfun:__isupper_l=uninstrumented\nfun:__iswalnum_l=uninstrumented\nfun:__iswalpha_l=uninstrumented\nfun:__iswblank_l=uninstrumented\nfun:__iswcntrl_l=uninstrumented\nfun:__iswctype=uninstrumented\nfun:__iswctype_l=uninstrumented\nfun:__iswdigit_l=uninstrumented\nfun:__iswgraph_l=uninstrumented\nfun:__iswlower_l=uninstrumented\nfun:__iswprint_l=uninstrumented\nfun:__iswpunct_l=uninstrumented\nfun:__iswspace_l=uninstrumented\nfun:__iswupper_l=uninstrumented\nfun:__iswxdigit_l=uninstrumented\nfun:__isxdigit_l=uninstrumented\nfun:__ivaliduser=uninstrumented\nfun:__j0_finite=uninstrumented\nfun:__j0f_finite=uninstrumented\nfun:__j0l_finite=uninstrumented\nfun:__j1_finite=uninstrumented\nfun:__j1f_finite=uninstrumented\nfun:__j1l_finite=uninstrumented\nfun:__jn_finite=uninstrumented\nfun:__jnf_finite=uninstrumented\nfun:__jnl_finite=uninstrumented\nfun:__letf2=uninstrumented\nfun:__lgamma_r_finite=uninstrumented\nfun:__lgammaf_r_finite=uninstrumented\nfun:__lgammal_r_finite=uninstrumented\nfun:__libc_alloca_cutoff=uninstrumented\nfun:__libc_allocate_rtsig=uninstrumented\nfun:__libc_allocate_rtsig_private=uninstrumented\nfun:__libc_calloc=uninstrumented\nfun:__libc_clntudp_bufcreate=uninstrumented\nfun:__libc_csu_fini=uninstrumented\nfun:__libc_csu_init=uninstrumented\nfun:__libc_current_sigrtmax=uninstrumented\nfun:__libc_current_sigrtmax_private=uninstrumented\nfun:__libc_current_sigrtmin=uninstrumented\nfun:__libc_current_sigrtmin_private=uninstrumented\nfun:__libc_dl_error_tsd=uninstrumented\nfun:__libc_dlclose=uninstrumented\nfun:__libc_dlopen_mode=uninstrumented\nfun:__libc_dlsym=uninstrumented\nfun:__libc_fatal=uninstrumented\nfun:__libc_fork=uninstrumented\nfun:__libc_free=uninstrumented\nfun:__libc_freeres=uninstrumented\nfun:__libc_ifunc_impl_list=uninstrumented\nfun:__libc_init_first=uninstrumented\nfun:__libc_longjmp=uninstrumented\nfun:__libc_mallinfo=uninstrumented\nfun:__libc_malloc=uninstrumented\nfun:__libc_mallopt=uninstrumented\nfun:__libc_memalign=uninstrumented\nfun:__libc_pthread_init=uninstrumented\nfun:__libc_pvalloc=uninstrumented\nfun:__libc_pwrite=uninstrumented\nfun:__libc_realloc=uninstrumented\nfun:__libc_res_nquery=uninstrumented\nfun:__libc_res_nsearch=uninstrumented\nfun:__libc_rpc_getport=uninstrumented\nfun:__libc_sa_len=uninstrumented\nfun:__libc_secure_getenv=uninstrumented\nfun:__libc_siglongjmp=uninstrumented\nfun:__libc_start_main=uninstrumented\nfun:__libc_system=uninstrumented\nfun:__libc_thread_freeres=uninstrumented\nfun:__libc_valloc=uninstrumented\nfun:__loc_aton=uninstrumented\nfun:__loc_ntoa=uninstrumented\nfun:__log10_finite=uninstrumented\nfun:__log10f_finite=uninstrumented\nfun:__log10l_finite=uninstrumented\nfun:__log2_finite=uninstrumented\nfun:__log2f_finite=uninstrumented\nfun:__log2l_finite=uninstrumented\nfun:__log_finite=uninstrumented\nfun:__logf_finite=uninstrumented\nfun:__logl_finite=uninstrumented\nfun:__longjmp_chk=uninstrumented\nfun:__lseek=uninstrumented\nfun:__lshrti3=uninstrumented\nfun:__lstat=uninstrumented\nfun:__lttf2=uninstrumented\nfun:__lxstat=uninstrumented\nfun:__lxstat64=uninstrumented\nfun:__madvise=uninstrumented\nfun:__mbrlen=uninstrumented\nfun:__mbrtowc=uninstrumented\nfun:__mbsnrtowcs_chk=uninstrumented\nfun:__mbsrtowcs_chk=uninstrumented\nfun:__mbstowcs_chk=uninstrumented\nfun:__memcpy_chk=uninstrumented\nfun:__memmove_chk=uninstrumented\nfun:__mempcpy=uninstrumented\nfun:__mempcpy_chk=uninstrumented\nfun:__mempcpy_small=uninstrumented\nfun:__memset_chk=uninstrumented\nfun:__mknod=uninstrumented\nfun:__mktemp=uninstrumented\nfun:__modti3=uninstrumented\nfun:__monstartup=uninstrumented\nfun:__morestack=uninstrumented\nfun:__morestack_allocate_stack_space=uninstrumented\nfun:__morestack_block_signals=uninstrumented\nfun:__morestack_fail=uninstrumented\nfun:__morestack_get_guard=uninstrumented\nfun:__morestack_large_model=uninstrumented\nfun:__morestack_load_mmap=uninstrumented\nfun:__morestack_make_guard=uninstrumented\nfun:__morestack_non_split=uninstrumented\nfun:__morestack_release_segments=uninstrumented\nfun:__morestack_set_guard=uninstrumented\nfun:__morestack_unblock_signals=uninstrumented\nfun:__mq_open_2=uninstrumented\nfun:__muldc3=uninstrumented\nfun:__mulsc3=uninstrumented\nfun:__multc3=uninstrumented\nfun:__multf3=uninstrumented\nfun:__multi3=uninstrumented\nfun:__mulvdi3=uninstrumented\nfun:__mulvsi3=uninstrumented\nfun:__mulvti3=uninstrumented\nfun:__mulxc3=uninstrumented\nfun:__nanosleep=uninstrumented\nfun:__negtf2=uninstrumented\nfun:__negti2=uninstrumented\nfun:__negvdi2=uninstrumented\nfun:__negvsi2=uninstrumented\nfun:__negvti2=uninstrumented\nfun:__netf2=uninstrumented\nfun:__newlocale=uninstrumented\nfun:__nis_default_access=uninstrumented\nfun:__nis_default_group=uninstrumented\nfun:__nis_default_owner=uninstrumented\nfun:__nis_default_ttl=uninstrumented\nfun:__nis_finddirectory=uninstrumented\nfun:__nis_hash=uninstrumented\nfun:__nisbind_connect=uninstrumented\nfun:__nisbind_create=uninstrumented\nfun:__nisbind_destroy=uninstrumented\nfun:__nisbind_next=uninstrumented\nfun:__nl_langinfo_l=uninstrumented\nfun:__ns_get16=uninstrumented\nfun:__ns_get32=uninstrumented\nfun:__ns_name_ntop=uninstrumented\nfun:__ns_name_unpack=uninstrumented\nfun:__nss_configure_lookup=uninstrumented\nfun:__nss_database_lookup=uninstrumented\nfun:__nss_disable_nscd=uninstrumented\nfun:__nss_group_lookup=uninstrumented\nfun:__nss_group_lookup2=uninstrumented\nfun:__nss_hostname_digits_dots=uninstrumented\nfun:__nss_hosts_lookup=uninstrumented\nfun:__nss_hosts_lookup2=uninstrumented\nfun:__nss_lookup=uninstrumented\nfun:__nss_lookup_function=uninstrumented\nfun:__nss_next=uninstrumented\nfun:__nss_next2=uninstrumented\nfun:__nss_passwd_lookup=uninstrumented\nfun:__nss_passwd_lookup2=uninstrumented\nfun:__nss_services_lookup2=uninstrumented\nfun:__obstack_printf_chk=uninstrumented\nfun:__obstack_vprintf_chk=uninstrumented\nfun:__open=uninstrumented\nfun:__open64=uninstrumented\nfun:__open64_2=uninstrumented\nfun:__open_2=uninstrumented\nfun:__open_catalog=uninstrumented\nfun:__openat64_2=uninstrumented\nfun:__openat_2=uninstrumented\nfun:__overflow=uninstrumented\nfun:__p_cdname=uninstrumented\nfun:__p_cdnname=uninstrumented\nfun:__p_class=uninstrumented\nfun:__p_fqname=uninstrumented\nfun:__p_fqnname=uninstrumented\nfun:__p_option=uninstrumented\nfun:__p_query=uninstrumented\nfun:__p_rcode=uninstrumented\nfun:__p_secstodate=uninstrumented\nfun:__p_time=uninstrumented\nfun:__p_type=uninstrumented\nfun:__paritydi2=uninstrumented\nfun:__parityti2=uninstrumented\nfun:__pipe=uninstrumented\nfun:__poll=uninstrumented\nfun:__poll_chk=uninstrumented\nfun:__popcountdi2=uninstrumented\nfun:__popcountti2=uninstrumented\nfun:__posix_getopt=uninstrumented\nfun:__pow_finite=uninstrumented\nfun:__powf_finite=uninstrumented\nfun:__powidf2=uninstrumented\nfun:__powisf2=uninstrumented\nfun:__powitf2=uninstrumented\nfun:__powixf2=uninstrumented\nfun:__powl_finite=uninstrumented\nfun:__ppoll_chk=uninstrumented\nfun:__pread64=uninstrumented\nfun:__pread64_chk=uninstrumented\nfun:__pread_chk=uninstrumented\nfun:__prepare_niscall=uninstrumented\nfun:__printf_chk=uninstrumented\nfun:__printf_fp=uninstrumented\nfun:__profile_frequency=uninstrumented\nfun:__pthread_atfork=uninstrumented\nfun:__pthread_cleanup_routine=uninstrumented\nfun:__pthread_clock_gettime=uninstrumented\nfun:__pthread_clock_settime=uninstrumented\nfun:__pthread_get_minstack=uninstrumented\nfun:__pthread_getspecific=uninstrumented\nfun:__pthread_initialize_minimal=uninstrumented\nfun:__pthread_key_create=uninstrumented\nfun:__pthread_mutex_destroy=uninstrumented\nfun:__pthread_mutex_init=uninstrumented\nfun:__pthread_mutex_lock=uninstrumented\nfun:__pthread_mutex_trylock=uninstrumented\nfun:__pthread_mutex_unlock=uninstrumented\nfun:__pthread_mutexattr_destroy=uninstrumented\nfun:__pthread_mutexattr_init=uninstrumented\nfun:__pthread_mutexattr_settype=uninstrumented\nfun:__pthread_once=uninstrumented\nfun:__pthread_register_cancel=uninstrumented\nfun:__pthread_register_cancel_defer=uninstrumented\nfun:__pthread_rwlock_destroy=uninstrumented\nfun:__pthread_rwlock_init=uninstrumented\nfun:__pthread_rwlock_rdlock=uninstrumented\nfun:__pthread_rwlock_tryrdlock=uninstrumented\nfun:__pthread_rwlock_trywrlock=uninstrumented\nfun:__pthread_rwlock_unlock=uninstrumented\nfun:__pthread_rwlock_wrlock=uninstrumented\nfun:__pthread_setspecific=uninstrumented\nfun:__pthread_unregister_cancel=uninstrumented\nfun:__pthread_unregister_cancel_restore=uninstrumented\nfun:__pthread_unwind=uninstrumented\nfun:__pthread_unwind_next=uninstrumented\nfun:__ptsname_r_chk=uninstrumented\nfun:__putlong=uninstrumented\nfun:__putshort=uninstrumented\nfun:__pwrite64=uninstrumented\nfun:__rawmemchr=uninstrumented\nfun:__read=uninstrumented\nfun:__read_chk=uninstrumented\nfun:__readlink_chk=uninstrumented\nfun:__readlinkat_chk=uninstrumented\nfun:__realpath_chk=uninstrumented\nfun:__recv_chk=uninstrumented\nfun:__recvfrom_chk=uninstrumented\nfun:__register_atfork=uninstrumented\nfun:__register_frame=uninstrumented\nfun:__register_frame_info=uninstrumented\nfun:__register_frame_info_bases=uninstrumented\nfun:__register_frame_info_table=uninstrumented\nfun:__register_frame_info_table_bases=uninstrumented\nfun:__register_frame_table=uninstrumented\nfun:__remainder_finite=uninstrumented\nfun:__remainderf_finite=uninstrumented\nfun:__remainderl_finite=uninstrumented\nfun:__res_close=uninstrumented\nfun:__res_dnok=uninstrumented\nfun:__res_hnok=uninstrumented\nfun:__res_hostalias=uninstrumented\nfun:__res_iclose=uninstrumented\nfun:__res_init=uninstrumented\nfun:__res_isourserver=uninstrumented\nfun:__res_mailok=uninstrumented\nfun:__res_maybe_init=uninstrumented\nfun:__res_mkquery=uninstrumented\nfun:__res_nameinquery=uninstrumented\nfun:__res_nclose=uninstrumented\nfun:__res_ninit=uninstrumented\nfun:__res_nmkquery=uninstrumented\nfun:__res_nquery=uninstrumented\nfun:__res_nquerydomain=uninstrumented\nfun:__res_nsearch=uninstrumented\nfun:__res_nsend=uninstrumented\nfun:__res_ownok=uninstrumented\nfun:__res_queriesmatch=uninstrumented\nfun:__res_query=uninstrumented\nfun:__res_querydomain=uninstrumented\nfun:__res_randomid=uninstrumented\nfun:__res_search=uninstrumented\nfun:__res_send=uninstrumented\nfun:__res_state=uninstrumented\nfun:__rpc_thread_createerr=uninstrumented\nfun:__rpc_thread_svc_fdset=uninstrumented\nfun:__rpc_thread_svc_max_pollfd=uninstrumented\nfun:__rpc_thread_svc_pollfd=uninstrumented\nfun:__sbrk=uninstrumented\nfun:__scalb_finite=uninstrumented\nfun:__scalbf_finite=uninstrumented\nfun:__scalbl_finite=uninstrumented\nfun:__sched_cpualloc=uninstrumented\nfun:__sched_cpucount=uninstrumented\nfun:__sched_cpufree=uninstrumented\nfun:__sched_get_priority_max=uninstrumented\nfun:__sched_get_priority_min=uninstrumented\nfun:__sched_getparam=uninstrumented\nfun:__sched_getscheduler=uninstrumented\nfun:__sched_setscheduler=uninstrumented\nfun:__sched_yield=uninstrumented\nfun:__secure_getenv=uninstrumented\nfun:__select=uninstrumented\nfun:__send=uninstrumented\nfun:__sendmmsg=uninstrumented\nfun:__setmntent=uninstrumented\nfun:__setpgid=uninstrumented\nfun:__sfp_handle_exceptions=uninstrumented\nfun:__sigaction=uninstrumented\nfun:__sigaddset=uninstrumented\nfun:__sigdelset=uninstrumented\nfun:__sigismember=uninstrumented\nfun:__signbit=uninstrumented\nfun:__signbitf=uninstrumented\nfun:__signbitl=uninstrumented\nfun:__sigpause=uninstrumented\nfun:__sigsetjmp=uninstrumented\nfun:__sigsuspend=uninstrumented\nfun:__sinh_finite=uninstrumented\nfun:__sinhf_finite=uninstrumented\nfun:__sinhl_finite=uninstrumented\nfun:__snprintf_chk=uninstrumented\nfun:__splitstack_block_signals=uninstrumented\nfun:__splitstack_block_signals_context=uninstrumented\nfun:__splitstack_find=uninstrumented\nfun:__splitstack_find_context=uninstrumented\nfun:__splitstack_getcontext=uninstrumented\nfun:__splitstack_makecontext=uninstrumented\nfun:__splitstack_releasecontext=uninstrumented\nfun:__splitstack_resetcontext=uninstrumented\nfun:__splitstack_setcontext=uninstrumented\nfun:__sprintf_chk=uninstrumented\nfun:__sqrt_finite=uninstrumented\nfun:__sqrtf_finite=uninstrumented\nfun:__sqrtl_finite=uninstrumented\nfun:__stack_chk_fail=uninstrumented\nfun:__stack_chk_fail_local=uninstrumented\nfun:__stack_split_initialize=uninstrumented\nfun:__stat=uninstrumented\nfun:__statfs=uninstrumented\nfun:__stpcpy=uninstrumented\nfun:__stpcpy_chk=uninstrumented\nfun:__stpcpy_small=uninstrumented\nfun:__stpncpy=uninstrumented\nfun:__stpncpy_chk=uninstrumented\nfun:__strcasecmp=uninstrumented\nfun:__strcasecmp_l=uninstrumented\nfun:__strcasestr=uninstrumented\nfun:__strcat_chk=uninstrumented\nfun:__strcoll_l=uninstrumented\nfun:__strcpy_chk=uninstrumented\nfun:__strcpy_small=uninstrumented\nfun:__strcspn_c1=uninstrumented\nfun:__strcspn_c2=uninstrumented\nfun:__strcspn_c3=uninstrumented\nfun:__strdup=uninstrumented\nfun:__strerror_r=uninstrumented\nfun:__strfmon_l=uninstrumented\nfun:__strftime_l=uninstrumented\nfun:__strncasecmp_l=uninstrumented\nfun:__strncat_chk=uninstrumented\nfun:__strncpy_chk=uninstrumented\nfun:__strndup=uninstrumented\nfun:__strpbrk_c2=uninstrumented\nfun:__strpbrk_c3=uninstrumented\nfun:__strsep_1c=uninstrumented\nfun:__strsep_2c=uninstrumented\nfun:__strsep_3c=uninstrumented\nfun:__strsep_g=uninstrumented\nfun:__strspn_c1=uninstrumented\nfun:__strspn_c2=uninstrumented\nfun:__strspn_c3=uninstrumented\nfun:__strtod_internal=uninstrumented\nfun:__strtod_l=uninstrumented\nfun:__strtof_internal=uninstrumented\nfun:__strtof_l=uninstrumented\nfun:__strtok_r=uninstrumented\nfun:__strtok_r_1c=uninstrumented\nfun:__strtol_internal=uninstrumented\nfun:__strtol_l=uninstrumented\nfun:__strtold_internal=uninstrumented\nfun:__strtold_l=uninstrumented\nfun:__strtoll_internal=uninstrumented\nfun:__strtoll_l=uninstrumented\nfun:__strtoul_internal=uninstrumented\nfun:__strtoul_l=uninstrumented\nfun:__strtoull_internal=uninstrumented\nfun:__strtoull_l=uninstrumented\nfun:__strverscmp=uninstrumented\nfun:__strxfrm_l=uninstrumented\nfun:__subtf3=uninstrumented\nfun:__subvdi3=uninstrumented\nfun:__subvsi3=uninstrumented\nfun:__subvti3=uninstrumented\nfun:__swprintf_chk=uninstrumented\nfun:__sym_ntop=uninstrumented\nfun:__sym_ntos=uninstrumented\nfun:__sym_ston=uninstrumented\nfun:__sysconf=uninstrumented\nfun:__sysctl=uninstrumented\nfun:__syslog_chk=uninstrumented\nfun:__sysv_signal=uninstrumented\nfun:__tls_get_addr=uninstrumented\nfun:__toascii_l=uninstrumented\nfun:__tolower_l=uninstrumented\nfun:__toupper_l=uninstrumented\nfun:__towctrans=uninstrumented\nfun:__towctrans_l=uninstrumented\nfun:__towlower_l=uninstrumented\nfun:__towupper_l=uninstrumented\nfun:__trunctfdf2=uninstrumented\nfun:__trunctfsf2=uninstrumented\nfun:__trunctfxf2=uninstrumented\nfun:__ttyname_r_chk=uninstrumented\nfun:__ucmpti2=uninstrumented\nfun:__udiv_w_sdiv=uninstrumented\nfun:__udivmodti4=uninstrumented\nfun:__udivti3=uninstrumented\nfun:__uflow=uninstrumented\nfun:__umodti3=uninstrumented\nfun:__underflow=uninstrumented\nfun:__unordtf2=uninstrumented\nfun:__uselocale=uninstrumented\nfun:__vasprintf_chk=uninstrumented\nfun:__vdprintf_chk=uninstrumented\nfun:__vfork=uninstrumented\nfun:__vfprintf_chk=uninstrumented\nfun:__vfscanf=uninstrumented\nfun:__vfwprintf_chk=uninstrumented\nfun:__vprintf_chk=uninstrumented\nfun:__vsnprintf=uninstrumented\nfun:__vsnprintf_chk=uninstrumented\nfun:__vsprintf_chk=uninstrumented\nfun:__vsscanf=uninstrumented\nfun:__vswprintf_chk=uninstrumented\nfun:__vsyslog_chk=uninstrumented\nfun:__vwprintf_chk=uninstrumented\nfun:__wait=uninstrumented\nfun:__waitpid=uninstrumented\nfun:__warn_memset_zero_len=uninstrumented\nfun:__wcpcpy_chk=uninstrumented\nfun:__wcpncpy_chk=uninstrumented\nfun:__wcrtomb_chk=uninstrumented\nfun:__wcscasecmp_l=uninstrumented\nfun:__wcscat_chk=uninstrumented\nfun:__wcscoll_l=uninstrumented\nfun:__wcscpy_chk=uninstrumented\nfun:__wcsftime_l=uninstrumented\nfun:__wcsncasecmp_l=uninstrumented\nfun:__wcsncat_chk=uninstrumented\nfun:__wcsncpy_chk=uninstrumented\nfun:__wcsnrtombs_chk=uninstrumented\nfun:__wcsrtombs_chk=uninstrumented\nfun:__wcstod_internal=uninstrumented\nfun:__wcstod_l=uninstrumented\nfun:__wcstof_internal=uninstrumented\nfun:__wcstof_l=uninstrumented\nfun:__wcstol_internal=uninstrumented\nfun:__wcstol_l=uninstrumented\nfun:__wcstold_internal=uninstrumented\nfun:__wcstold_l=uninstrumented\nfun:__wcstoll_internal=uninstrumented\nfun:__wcstoll_l=uninstrumented\nfun:__wcstombs_chk=uninstrumented\nfun:__wcstoul_internal=uninstrumented\nfun:__wcstoul_l=uninstrumented\nfun:__wcstoull_internal=uninstrumented\nfun:__wcstoull_l=uninstrumented\nfun:__wcsxfrm_l=uninstrumented\nfun:__wctomb_chk=uninstrumented\nfun:__wctrans_l=uninstrumented\nfun:__wctype_l=uninstrumented\nfun:__wmemcpy_chk=uninstrumented\nfun:__wmemmove_chk=uninstrumented\nfun:__wmempcpy_chk=uninstrumented\nfun:__wmemset_chk=uninstrumented\nfun:__woverflow=uninstrumented\nfun:__wprintf_chk=uninstrumented\nfun:__wrap_pthread_create=uninstrumented\nfun:__write=uninstrumented\nfun:__wuflow=uninstrumented\nfun:__wunderflow=uninstrumented\nfun:__xmknod=uninstrumented\nfun:__xmknodat=uninstrumented\nfun:__xpg_basename=uninstrumented\nfun:__xpg_sigpause=uninstrumented\nfun:__xpg_strerror_r=uninstrumented\nfun:__xstat=uninstrumented\nfun:__xstat64=uninstrumented\nfun:__y0_finite=uninstrumented\nfun:__y0f_finite=uninstrumented\nfun:__y0l_finite=uninstrumented\nfun:__y1_finite=uninstrumented\nfun:__y1f_finite=uninstrumented\nfun:__y1l_finite=uninstrumented\nfun:__yn_finite=uninstrumented\nfun:__ynf_finite=uninstrumented\nfun:__ynl_finite=uninstrumented\nfun:__yp_check=uninstrumented\nfun:_authenticate=uninstrumented\nfun:_dl_addr=uninstrumented\nfun:_dl_allocate_tls=uninstrumented\nfun:_dl_allocate_tls_init=uninstrumented\nfun:_dl_deallocate_tls=uninstrumented\nfun:_dl_debug_state=uninstrumented\nfun:_dl_find_dso_for_object=uninstrumented\nfun:_dl_get_tls_static_info=uninstrumented\nfun:_dl_make_stack_executable=uninstrumented\nfun:_dl_mcount=uninstrumented\nfun:_dl_mcount_wrapper=uninstrumented\nfun:_dl_mcount_wrapper_check=uninstrumented\nfun:_dl_rtld_di_serinfo=uninstrumented\nfun:_dl_sym=uninstrumented\nfun:_dl_tls_setup=uninstrumented\nfun:_dl_vsym=uninstrumented\nfun:_exit=uninstrumented\nfun:_flushlbf=uninstrumented\nfun:_gethtbyaddr=uninstrumented\nfun:_gethtbyname=uninstrumented\nfun:_gethtbyname2=uninstrumented\nfun:_gethtent=uninstrumented\nfun:_getlong=uninstrumented\nfun:_getshort=uninstrumented\nfun:_longjmp=uninstrumented\nfun:_mcleanup=uninstrumented\nfun:_mcount=uninstrumented\nfun:_nsl_default_nss=uninstrumented\nfun:_nss_files_parse_grent=uninstrumented\nfun:_nss_files_parse_pwent=uninstrumented\nfun:_nss_files_parse_sgent=uninstrumented\nfun:_nss_files_parse_spent=uninstrumented\nfun:_obstack_allocated_p=uninstrumented\nfun:_obstack_begin=uninstrumented\nfun:_obstack_begin_1=uninstrumented\nfun:_obstack_free=uninstrumented\nfun:_obstack_memory_used=uninstrumented\nfun:_obstack_newchunk=uninstrumented\nfun:_pthread_cleanup_pop=uninstrumented\nfun:_pthread_cleanup_pop_restore=uninstrumented\nfun:_pthread_cleanup_push=uninstrumented\nfun:_pthread_cleanup_push_defer=uninstrumented\nfun:_rpc_dtablesize=uninstrumented\nfun:_seterr_reply=uninstrumented\nfun:_sethtent=uninstrumented\nfun:_setjmp=uninstrumented\nfun:_tolower=uninstrumented\nfun:_toupper=uninstrumented\nfun:_xdr_ib_request=uninstrumented\nfun:_xdr_nis_result=uninstrumented\nfun:a64l=uninstrumented\nfun:abort=uninstrumented\nfun:abs=uninstrumented\nfun:accept=uninstrumented\nfun:accept4=uninstrumented\nfun:access=uninstrumented\nfun:acct=uninstrumented\nfun:acos=uninstrumented\nfun:acosf=uninstrumented\nfun:acosh=uninstrumented\nfun:acoshf=uninstrumented\nfun:acoshl=uninstrumented\nfun:acosl=uninstrumented\nfun:addmntent=uninstrumented\nfun:addseverity=uninstrumented\nfun:adjtime=uninstrumented\nfun:adjtimex=uninstrumented\nfun:advance=uninstrumented\nfun:aio_cancel=uninstrumented\nfun:aio_cancel64=uninstrumented\nfun:aio_error=uninstrumented\nfun:aio_error64=uninstrumented\nfun:aio_fsync=uninstrumented\nfun:aio_fsync64=uninstrumented\nfun:aio_init=uninstrumented\nfun:aio_read=uninstrumented\nfun:aio_read64=uninstrumented\nfun:aio_return=uninstrumented\nfun:aio_return64=uninstrumented\nfun:aio_suspend=uninstrumented\nfun:aio_suspend64=uninstrumented\nfun:aio_write=uninstrumented\nfun:aio_write64=uninstrumented\nfun:alarm=uninstrumented\nfun:aligned_alloc=uninstrumented\nfun:alphasort=uninstrumented\nfun:alphasort64=uninstrumented\nfun:arch_prctl=uninstrumented\nfun:argp_error=uninstrumented\nfun:argp_failure=uninstrumented\nfun:argp_help=uninstrumented\nfun:argp_parse=uninstrumented\nfun:argp_state_help=uninstrumented\nfun:argp_usage=uninstrumented\nfun:argz_add=uninstrumented\nfun:argz_add_sep=uninstrumented\nfun:argz_append=uninstrumented\nfun:argz_count=uninstrumented\nfun:argz_create=uninstrumented\nfun:argz_create_sep=uninstrumented\nfun:argz_delete=uninstrumented\nfun:argz_extract=uninstrumented\nfun:argz_insert=uninstrumented\nfun:argz_next=uninstrumented\nfun:argz_replace=uninstrumented\nfun:argz_stringify=uninstrumented\nfun:asctime=uninstrumented\nfun:asctime_r=uninstrumented\nfun:asin=uninstrumented\nfun:asinf=uninstrumented\nfun:asinh=uninstrumented\nfun:asinhf=uninstrumented\nfun:asinhl=uninstrumented\nfun:asinl=uninstrumented\nfun:asprintf=uninstrumented\nfun:at_quick_exit=uninstrumented\nfun:atan=uninstrumented\nfun:atan2=uninstrumented\nfun:atan2f=uninstrumented\nfun:atan2l=uninstrumented\nfun:atanf=uninstrumented\nfun:atanh=uninstrumented\nfun:atanhf=uninstrumented\nfun:atanhl=uninstrumented\nfun:atanl=uninstrumented\nfun:atexit=uninstrumented\nfun:atof=uninstrumented\nfun:atoi=uninstrumented\nfun:atol=uninstrumented\nfun:atoll=uninstrumented\nfun:authdes_create=uninstrumented\nfun:authdes_getucred=uninstrumented\nfun:authdes_pk_create=uninstrumented\nfun:authnone_create=uninstrumented\nfun:authunix_create=uninstrumented\nfun:authunix_create_default=uninstrumented\nfun:backtrace=uninstrumented\nfun:backtrace_symbols=uninstrumented\nfun:backtrace_symbols_fd=uninstrumented\nfun:basename=uninstrumented\nfun:bcmp=uninstrumented\nfun:bcopy=uninstrumented\nfun:bdflush=uninstrumented\nfun:bind=uninstrumented\nfun:bind_textdomain_codeset=uninstrumented\nfun:bindresvport=uninstrumented\nfun:bindtextdomain=uninstrumented\nfun:brk=uninstrumented\nfun:bsd_signal=uninstrumented\nfun:bsearch=uninstrumented\nfun:btowc=uninstrumented\nfun:bzero=uninstrumented\nfun:c16rtomb=uninstrumented\nfun:c32rtomb=uninstrumented\nfun:cabs=uninstrumented\nfun:cabsf=uninstrumented\nfun:cabsl=uninstrumented\nfun:cacos=uninstrumented\nfun:cacosf=uninstrumented\nfun:cacosh=uninstrumented\nfun:cacoshf=uninstrumented\nfun:cacoshl=uninstrumented\nfun:cacosl=uninstrumented\nfun:calloc=uninstrumented\nfun:callrpc=uninstrumented\nfun:canonicalize_file_name=uninstrumented\nfun:capget=uninstrumented\nfun:capset=uninstrumented\nfun:carg=uninstrumented\nfun:cargf=uninstrumented\nfun:cargl=uninstrumented\nfun:casin=uninstrumented\nfun:casinf=uninstrumented\nfun:casinh=uninstrumented\nfun:casinhf=uninstrumented\nfun:casinhl=uninstrumented\nfun:casinl=uninstrumented\nfun:catan=uninstrumented\nfun:catanf=uninstrumented\nfun:catanh=uninstrumented\nfun:catanhf=uninstrumented\nfun:catanhl=uninstrumented\nfun:catanl=uninstrumented\nfun:catclose=uninstrumented\nfun:catgets=uninstrumented\nfun:catopen=uninstrumented\nfun:cbc_crypt=uninstrumented\nfun:cbrt=uninstrumented\nfun:cbrtf=uninstrumented\nfun:cbrtl=uninstrumented\nfun:ccos=uninstrumented\nfun:ccosf=uninstrumented\nfun:ccosh=uninstrumented\nfun:ccoshf=uninstrumented\nfun:ccoshl=uninstrumented\nfun:ccosl=uninstrumented\nfun:ceil=uninstrumented\nfun:ceilf=uninstrumented\nfun:ceill=uninstrumented\nfun:cexp=uninstrumented\nfun:cexpf=uninstrumented\nfun:cexpl=uninstrumented\nfun:cfgetispeed=uninstrumented\nfun:cfgetospeed=uninstrumented\nfun:cfmakeraw=uninstrumented\nfun:cfree=uninstrumented\nfun:cfsetispeed=uninstrumented\nfun:cfsetospeed=uninstrumented\nfun:cfsetspeed=uninstrumented\nfun:chdir=uninstrumented\nfun:chflags=uninstrumented\nfun:chmod=uninstrumented\nfun:chown=uninstrumented\nfun:chroot=uninstrumented\nfun:cimag=uninstrumented\nfun:cimagf=uninstrumented\nfun:cimagl=uninstrumented\nfun:clearenv=uninstrumented\nfun:clearerr=uninstrumented\nfun:clearerr_unlocked=uninstrumented\nfun:clnt_broadcast=uninstrumented\nfun:clnt_create=uninstrumented\nfun:clnt_pcreateerror=uninstrumented\nfun:clnt_perrno=uninstrumented\nfun:clnt_perror=uninstrumented\nfun:clnt_spcreateerror=uninstrumented\nfun:clnt_sperrno=uninstrumented\nfun:clnt_sperror=uninstrumented\nfun:clntraw_create=uninstrumented\nfun:clnttcp_create=uninstrumented\nfun:clntudp_bufcreate=uninstrumented\nfun:clntudp_create=uninstrumented\nfun:clntunix_create=uninstrumented\nfun:clock=uninstrumented\nfun:clock_adjtime=uninstrumented\nfun:clock_getcpuclockid=uninstrumented\nfun:clock_getres=uninstrumented\nfun:clock_gettime=uninstrumented\nfun:clock_nanosleep=uninstrumented\nfun:clock_settime=uninstrumented\nfun:clog=uninstrumented\nfun:clog10=uninstrumented\nfun:clog10f=uninstrumented\nfun:clog10l=uninstrumented\nfun:clogf=uninstrumented\nfun:clogl=uninstrumented\nfun:clone=uninstrumented\nfun:close=uninstrumented\nfun:closedir=uninstrumented\nfun:closelog=uninstrumented\nfun:confstr=uninstrumented\nfun:conj=uninstrumented\nfun:conjf=uninstrumented\nfun:conjl=uninstrumented\nfun:connect=uninstrumented\nfun:copysign=uninstrumented\nfun:copysignf=uninstrumented\nfun:copysignl=uninstrumented\nfun:cos=uninstrumented\nfun:cosf=uninstrumented\nfun:cosh=uninstrumented\nfun:coshf=uninstrumented\nfun:coshl=uninstrumented\nfun:cosl=uninstrumented\nfun:cpow=uninstrumented\nfun:cpowf=uninstrumented\nfun:cpowl=uninstrumented\nfun:cproj=uninstrumented\nfun:cprojf=uninstrumented\nfun:cprojl=uninstrumented\nfun:creal=uninstrumented\nfun:crealf=uninstrumented\nfun:creall=uninstrumented\nfun:creat=uninstrumented\nfun:creat64=uninstrumented\nfun:create_module=uninstrumented\nfun:crypt=uninstrumented\nfun:crypt_r=uninstrumented\nfun:csin=uninstrumented\nfun:csinf=uninstrumented\nfun:csinh=uninstrumented\nfun:csinhf=uninstrumented\nfun:csinhl=uninstrumented\nfun:csinl=uninstrumented\nfun:csqrt=uninstrumented\nfun:csqrtf=uninstrumented\nfun:csqrtl=uninstrumented\nfun:ctan=uninstrumented\nfun:ctanf=uninstrumented\nfun:ctanh=uninstrumented\nfun:ctanhf=uninstrumented\nfun:ctanhl=uninstrumented\nfun:ctanl=uninstrumented\nfun:ctermid=uninstrumented\nfun:ctime=uninstrumented\nfun:ctime_r=uninstrumented\nfun:cuserid=uninstrumented\nfun:daemon=uninstrumented\nfun:dcgettext=uninstrumented\nfun:dcngettext=uninstrumented\nfun:delete_module=uninstrumented\nfun:des_setparity=uninstrumented\nfun:dgettext=uninstrumented\nfun:difftime=uninstrumented\nfun:dirfd=uninstrumented\nfun:dirname=uninstrumented\nfun:div=uninstrumented\nfun:dl_iterate_phdr=uninstrumented\nfun:dladdr=uninstrumented\nfun:dladdr1=uninstrumented\nfun:dlclose=uninstrumented\nfun:dlerror=uninstrumented\nfun:dlinfo=uninstrumented\nfun:dlmopen=uninstrumented\nfun:dlopen=uninstrumented\nfun:dlsym=uninstrumented\nfun:dlvsym=uninstrumented\nfun:dngettext=uninstrumented\nfun:dprintf=uninstrumented\nfun:drand48=uninstrumented\nfun:drand48_r=uninstrumented\nfun:drem=uninstrumented\nfun:dremf=uninstrumented\nfun:dreml=uninstrumented\nfun:dup=uninstrumented\nfun:dup2=uninstrumented\nfun:dup3=uninstrumented\nfun:duplocale=uninstrumented\nfun:dysize=uninstrumented\nfun:eaccess=uninstrumented\nfun:ecb_crypt=uninstrumented\nfun:ecvt=uninstrumented\nfun:ecvt_r=uninstrumented\nfun:encrypt=uninstrumented\nfun:encrypt_r=uninstrumented\nfun:endaliasent=uninstrumented\nfun:endfsent=uninstrumented\nfun:endgrent=uninstrumented\nfun:endhostent=uninstrumented\nfun:endmntent=uninstrumented\nfun:endnetent=uninstrumented\nfun:endnetgrent=uninstrumented\nfun:endprotoent=uninstrumented\nfun:endpwent=uninstrumented\nfun:endrpcent=uninstrumented\nfun:endservent=uninstrumented\nfun:endsgent=uninstrumented\nfun:endspent=uninstrumented\nfun:endttyent=uninstrumented\nfun:endusershell=uninstrumented\nfun:endutent=uninstrumented\nfun:endutxent=uninstrumented\nfun:envz_add=uninstrumented\nfun:envz_entry=uninstrumented\nfun:envz_get=uninstrumented\nfun:envz_merge=uninstrumented\nfun:envz_remove=uninstrumented\nfun:envz_strip=uninstrumented\nfun:epoll_create=uninstrumented\nfun:epoll_create1=uninstrumented\nfun:epoll_ctl=uninstrumented\nfun:epoll_pwait=uninstrumented\nfun:epoll_wait=uninstrumented\nfun:erand48=uninstrumented\nfun:erand48_r=uninstrumented\nfun:erf=uninstrumented\nfun:erfc=uninstrumented\nfun:erfcf=uninstrumented\nfun:erfcl=uninstrumented\nfun:erff=uninstrumented\nfun:erfl=uninstrumented\nfun:err=uninstrumented\nfun:error=uninstrumented\nfun:error_at_line=uninstrumented\nfun:errx=uninstrumented\nfun:ether_aton=uninstrumented\nfun:ether_aton_r=uninstrumented\nfun:ether_hostton=uninstrumented\nfun:ether_line=uninstrumented\nfun:ether_ntoa=uninstrumented\nfun:ether_ntoa_r=uninstrumented\nfun:ether_ntohost=uninstrumented\nfun:euidaccess=uninstrumented\nfun:eventfd=uninstrumented\nfun:eventfd_read=uninstrumented\nfun:eventfd_write=uninstrumented\nfun:execl=uninstrumented\nfun:execle=uninstrumented\nfun:execlp=uninstrumented\nfun:execv=uninstrumented\nfun:execve=uninstrumented\nfun:execvp=uninstrumented\nfun:execvpe=uninstrumented\nfun:exit=uninstrumented\nfun:exp=uninstrumented\nfun:exp10=uninstrumented\nfun:exp10f=uninstrumented\nfun:exp10l=uninstrumented\nfun:exp2=uninstrumented\nfun:exp2f=uninstrumented\nfun:exp2l=uninstrumented\nfun:expf=uninstrumented\nfun:expl=uninstrumented\nfun:expm1=uninstrumented\nfun:expm1f=uninstrumented\nfun:expm1l=uninstrumented\nfun:fabs=uninstrumented\nfun:fabsf=uninstrumented\nfun:fabsl=uninstrumented\nfun:faccessat=uninstrumented\nfun:fallocate=uninstrumented\nfun:fallocate64=uninstrumented\nfun:fanotify_init=uninstrumented\nfun:fanotify_mark=uninstrumented\nfun:fattach=uninstrumented\nfun:fchdir=uninstrumented\nfun:fchflags=uninstrumented\nfun:fchmod=uninstrumented\nfun:fchmodat=uninstrumented\nfun:fchown=uninstrumented\nfun:fchownat=uninstrumented\nfun:fclose=uninstrumented\nfun:fcloseall=uninstrumented\nfun:fcntl=uninstrumented\nfun:fcrypt=uninstrumented\nfun:fcvt=uninstrumented\nfun:fcvt_r=uninstrumented\nfun:fdatasync=uninstrumented\nfun:fdetach=uninstrumented\nfun:fdim=uninstrumented\nfun:fdimf=uninstrumented\nfun:fdiml=uninstrumented\nfun:fdopen=uninstrumented\nfun:fdopendir=uninstrumented\nfun:feclearexcept=uninstrumented\nfun:fedisableexcept=uninstrumented\nfun:feenableexcept=uninstrumented\nfun:fegetenv=uninstrumented\nfun:fegetexcept=uninstrumented\nfun:fegetexceptflag=uninstrumented\nfun:fegetround=uninstrumented\nfun:feholdexcept=uninstrumented\nfun:feof=uninstrumented\nfun:feof_unlocked=uninstrumented\nfun:feraiseexcept=uninstrumented\nfun:ferror=uninstrumented\nfun:ferror_unlocked=uninstrumented\nfun:fesetenv=uninstrumented\nfun:fesetexceptflag=uninstrumented\nfun:fesetround=uninstrumented\nfun:fetestexcept=uninstrumented\nfun:feupdateenv=uninstrumented\nfun:fexecve=uninstrumented\nfun:fflush=uninstrumented\nfun:fflush_unlocked=uninstrumented\nfun:ffs=uninstrumented\nfun:ffsl=uninstrumented\nfun:ffsll=uninstrumented\nfun:fgetc=uninstrumented\nfun:fgetc_unlocked=uninstrumented\nfun:fgetgrent=uninstrumented\nfun:fgetgrent_r=uninstrumented\nfun:fgetpos=uninstrumented\nfun:fgetpos64=uninstrumented\nfun:fgetpwent=uninstrumented\nfun:fgetpwent_r=uninstrumented\nfun:fgets=uninstrumented\nfun:fgets_unlocked=uninstrumented\nfun:fgetsgent=uninstrumented\nfun:fgetsgent_r=uninstrumented\nfun:fgetspent=uninstrumented\nfun:fgetspent_r=uninstrumented\nfun:fgetwc=uninstrumented\nfun:fgetwc_unlocked=uninstrumented\nfun:fgetws=uninstrumented\nfun:fgetws_unlocked=uninstrumented\nfun:fgetxattr=uninstrumented\nfun:fileno=uninstrumented\nfun:fileno_unlocked=uninstrumented\nfun:finite=uninstrumented\nfun:finitef=uninstrumented\nfun:finitel=uninstrumented\nfun:flistxattr=uninstrumented\nfun:flock=uninstrumented\nfun:flockfile=uninstrumented\nfun:floor=uninstrumented\nfun:floorf=uninstrumented\nfun:floorl=uninstrumented\nfun:fma=uninstrumented\nfun:fmaf=uninstrumented\nfun:fmal=uninstrumented\nfun:fmax=uninstrumented\nfun:fmaxf=uninstrumented\nfun:fmaxl=uninstrumented\nfun:fmemopen=uninstrumented\nfun:fmin=uninstrumented\nfun:fminf=uninstrumented\nfun:fminl=uninstrumented\nfun:fmod=uninstrumented\nfun:fmodf=uninstrumented\nfun:fmodl=uninstrumented\nfun:fmtmsg=uninstrumented\nfun:fnmatch=uninstrumented\nfun:fopen=uninstrumented\nfun:fopen64=uninstrumented\nfun:fopencookie=uninstrumented\nfun:fork=uninstrumented\nfun:forkpty=uninstrumented\nfun:fpathconf=uninstrumented\nfun:fprintf=uninstrumented\nfun:fputc=uninstrumented\nfun:fputc_unlocked=uninstrumented\nfun:fputs=uninstrumented\nfun:fputs_unlocked=uninstrumented\nfun:fputwc=uninstrumented\nfun:fputwc_unlocked=uninstrumented\nfun:fputws=uninstrumented\nfun:fputws_unlocked=uninstrumented\nfun:fread=uninstrumented\nfun:fread_unlocked=uninstrumented\nfun:free=uninstrumented\nfun:freeaddrinfo=uninstrumented\nfun:freeifaddrs=uninstrumented\nfun:freelocale=uninstrumented\nfun:fremovexattr=uninstrumented\nfun:freopen=uninstrumented\nfun:freopen64=uninstrumented\nfun:frexp=uninstrumented\nfun:frexpf=uninstrumented\nfun:frexpl=uninstrumented\nfun:fscanf=uninstrumented\nfun:fseek=uninstrumented\nfun:fseeko=uninstrumented\nfun:fseeko64=uninstrumented\nfun:fsetpos=uninstrumented\nfun:fsetpos64=uninstrumented\nfun:fsetxattr=uninstrumented\nfun:fstat=uninstrumented\nfun:fstat64=uninstrumented\nfun:fstatat=uninstrumented\nfun:fstatat64=uninstrumented\nfun:fstatfs=uninstrumented\nfun:fstatfs64=uninstrumented\nfun:fstatvfs=uninstrumented\nfun:fstatvfs64=uninstrumented\nfun:fsync=uninstrumented\nfun:ftell=uninstrumented\nfun:ftello=uninstrumented\nfun:ftello64=uninstrumented\nfun:ftime=uninstrumented\nfun:ftok=uninstrumented\nfun:ftruncate=uninstrumented\nfun:ftruncate64=uninstrumented\nfun:ftrylockfile=uninstrumented\nfun:fts_children=uninstrumented\nfun:fts_close=uninstrumented\nfun:fts_open=uninstrumented\nfun:fts_read=uninstrumented\nfun:fts_set=uninstrumented\nfun:ftw=uninstrumented\nfun:ftw64=uninstrumented\nfun:funlockfile=uninstrumented\nfun:futimens=uninstrumented\nfun:futimes=uninstrumented\nfun:futimesat=uninstrumented\nfun:fwide=uninstrumented\nfun:fwprintf=uninstrumented\nfun:fwrite=uninstrumented\nfun:fwrite_unlocked=uninstrumented\nfun:fwscanf=uninstrumented\nfun:gai_cancel=uninstrumented\nfun:gai_error=uninstrumented\nfun:gai_strerror=uninstrumented\nfun:gai_suspend=uninstrumented\nfun:gamma=uninstrumented\nfun:gammaf=uninstrumented\nfun:gammal=uninstrumented\nfun:gcvt=uninstrumented\nfun:get_avphys_pages=uninstrumented\nfun:get_current_dir_name=uninstrumented\nfun:get_kernel_syms=uninstrumented\nfun:get_myaddress=uninstrumented\nfun:get_nprocs=uninstrumented\nfun:get_nprocs_conf=uninstrumented\nfun:get_phys_pages=uninstrumented\nfun:getaddrinfo=uninstrumented\nfun:getaddrinfo_a=uninstrumented\nfun:getaliasbyname=uninstrumented\nfun:getaliasbyname_r=uninstrumented\nfun:getaliasent=uninstrumented\nfun:getaliasent_r=uninstrumented\nfun:getauxval=uninstrumented\nfun:getc=uninstrumented\nfun:getc_unlocked=uninstrumented\nfun:getchar=uninstrumented\nfun:getchar_unlocked=uninstrumented\nfun:getcontext=uninstrumented\nfun:getcwd=uninstrumented\nfun:getdate=uninstrumented\nfun:getdate_r=uninstrumented\nfun:getdelim=uninstrumented\nfun:getdirentries=uninstrumented\nfun:getdirentries64=uninstrumented\nfun:getdomainname=uninstrumented\nfun:getdtablesize=uninstrumented\nfun:getegid=uninstrumented\nfun:getenv=uninstrumented\nfun:geteuid=uninstrumented\nfun:getfsent=uninstrumented\nfun:getfsfile=uninstrumented\nfun:getfsspec=uninstrumented\nfun:getgid=uninstrumented\nfun:getgrent=uninstrumented\nfun:getgrent_r=uninstrumented\nfun:getgrgid=uninstrumented\nfun:getgrgid_r=uninstrumented\nfun:getgrnam=uninstrumented\nfun:getgrnam_r=uninstrumented\nfun:getgrouplist=uninstrumented\nfun:getgroups=uninstrumented\nfun:gethostbyaddr=uninstrumented\nfun:gethostbyaddr_r=uninstrumented\nfun:gethostbyname=uninstrumented\nfun:gethostbyname2=uninstrumented\nfun:gethostbyname2_r=uninstrumented\nfun:gethostbyname_r=uninstrumented\nfun:gethostent=uninstrumented\nfun:gethostent_r=uninstrumented\nfun:gethostid=uninstrumented\nfun:gethostname=uninstrumented\nfun:getifaddrs=uninstrumented\nfun:getipv4sourcefilter=uninstrumented\nfun:getitimer=uninstrumented\nfun:getline=uninstrumented\nfun:getloadavg=uninstrumented\nfun:getlogin=uninstrumented\nfun:getlogin_r=uninstrumented\nfun:getmntent=uninstrumented\nfun:getmntent_r=uninstrumented\nfun:getmsg=uninstrumented\nfun:getnameinfo=uninstrumented\nfun:getnetbyaddr=uninstrumented\nfun:getnetbyaddr_r=uninstrumented\nfun:getnetbyname=uninstrumented\nfun:getnetbyname_r=uninstrumented\nfun:getnetent=uninstrumented\nfun:getnetent_r=uninstrumented\nfun:getnetgrent=uninstrumented\nfun:getnetgrent_r=uninstrumented\nfun:getnetname=uninstrumented\nfun:getopt=uninstrumented\nfun:getopt_long=uninstrumented\nfun:getopt_long_only=uninstrumented\nfun:getpagesize=uninstrumented\nfun:getpass=uninstrumented\nfun:getpeername=uninstrumented\nfun:getpgid=uninstrumented\nfun:getpgrp=uninstrumented\nfun:getpid=uninstrumented\nfun:getpmsg=uninstrumented\nfun:getppid=uninstrumented\nfun:getpriority=uninstrumented\nfun:getprotobyname=uninstrumented\nfun:getprotobyname_r=uninstrumented\nfun:getprotobynumber=uninstrumented\nfun:getprotobynumber_r=uninstrumented\nfun:getprotoent=uninstrumented\nfun:getprotoent_r=uninstrumented\nfun:getpt=uninstrumented\nfun:getpublickey=uninstrumented\nfun:getpw=uninstrumented\nfun:getpwent=uninstrumented\nfun:getpwent_r=uninstrumented\nfun:getpwnam=uninstrumented\nfun:getpwnam_r=uninstrumented\nfun:getpwuid=uninstrumented\nfun:getpwuid_r=uninstrumented\nfun:getresgid=uninstrumented\nfun:getresuid=uninstrumented\nfun:getrlimit=uninstrumented\nfun:getrlimit64=uninstrumented\nfun:getrpcbyname=uninstrumented\nfun:getrpcbyname_r=uninstrumented\nfun:getrpcbynumber=uninstrumented\nfun:getrpcbynumber_r=uninstrumented\nfun:getrpcent=uninstrumented\nfun:getrpcent_r=uninstrumented\nfun:getrpcport=uninstrumented\nfun:getrusage=uninstrumented\nfun:gets=uninstrumented\nfun:getsecretkey=uninstrumented\nfun:getservbyname=uninstrumented\nfun:getservbyname_r=uninstrumented\nfun:getservbyport=uninstrumented\nfun:getservbyport_r=uninstrumented\nfun:getservent=uninstrumented\nfun:getservent_r=uninstrumented\nfun:getsgent=uninstrumented\nfun:getsgent_r=uninstrumented\nfun:getsgnam=uninstrumented\nfun:getsgnam_r=uninstrumented\nfun:getsid=uninstrumented\nfun:getsockname=uninstrumented\nfun:getsockopt=uninstrumented\nfun:getsourcefilter=uninstrumented\nfun:getspent=uninstrumented\nfun:getspent_r=uninstrumented\nfun:getspnam=uninstrumented\nfun:getspnam_r=uninstrumented\nfun:getsubopt=uninstrumented\nfun:gettext=uninstrumented\nfun:gettimeofday=uninstrumented\nfun:getttyent=uninstrumented\nfun:getttynam=uninstrumented\nfun:getuid=uninstrumented\nfun:getusershell=uninstrumented\nfun:getutent=uninstrumented\nfun:getutent_r=uninstrumented\nfun:getutid=uninstrumented\nfun:getutid_r=uninstrumented\nfun:getutline=uninstrumented\nfun:getutline_r=uninstrumented\nfun:getutmp=uninstrumented\nfun:getutmpx=uninstrumented\nfun:getutxent=uninstrumented\nfun:getutxid=uninstrumented\nfun:getutxline=uninstrumented\nfun:getw=uninstrumented\nfun:getwc=uninstrumented\nfun:getwc_unlocked=uninstrumented\nfun:getwchar=uninstrumented\nfun:getwchar_unlocked=uninstrumented\nfun:getwd=uninstrumented\nfun:getxattr=uninstrumented\nfun:glob=uninstrumented\nfun:glob64=uninstrumented\nfun:glob_pattern_p=uninstrumented\nfun:globfree=uninstrumented\nfun:globfree64=uninstrumented\nfun:gmtime=uninstrumented\nfun:gmtime_r=uninstrumented\nfun:gnu_dev_major=uninstrumented\nfun:gnu_dev_makedev=uninstrumented\nfun:gnu_dev_minor=uninstrumented\nfun:gnu_get_libc_release=uninstrumented\nfun:gnu_get_libc_version=uninstrumented\nfun:grantpt=uninstrumented\nfun:group_member=uninstrumented\nfun:gsignal=uninstrumented\nfun:gtty=uninstrumented\nfun:hasmntopt=uninstrumented\nfun:hcreate=uninstrumented\nfun:hcreate_r=uninstrumented\nfun:hdestroy=uninstrumented\nfun:hdestroy_r=uninstrumented\nfun:herror=uninstrumented\nfun:host2netname=uninstrumented\nfun:hsearch=uninstrumented\nfun:hsearch_r=uninstrumented\nfun:hstrerror=uninstrumented\nfun:htonl=uninstrumented\nfun:htons=uninstrumented\nfun:hypot=uninstrumented\nfun:hypotf=uninstrumented\nfun:hypotl=uninstrumented\nfun:iconv=uninstrumented\nfun:iconv_close=uninstrumented\nfun:iconv_open=uninstrumented\nfun:idna_to_ascii_lz=uninstrumented\nfun:idna_to_unicode_lzlz=uninstrumented\nfun:if_freenameindex=uninstrumented\nfun:if_indextoname=uninstrumented\nfun:if_nameindex=uninstrumented\nfun:if_nametoindex=uninstrumented\nfun:ilogb=uninstrumented\nfun:ilogbf=uninstrumented\nfun:ilogbl=uninstrumented\nfun:imaxabs=uninstrumented\nfun:imaxdiv=uninstrumented\nfun:index=uninstrumented\nfun:inet6_opt_append=uninstrumented\nfun:inet6_opt_find=uninstrumented\nfun:inet6_opt_finish=uninstrumented\nfun:inet6_opt_get_val=uninstrumented\nfun:inet6_opt_init=uninstrumented\nfun:inet6_opt_next=uninstrumented\nfun:inet6_opt_set_val=uninstrumented\nfun:inet6_option_alloc=uninstrumented\nfun:inet6_option_append=uninstrumented\nfun:inet6_option_find=uninstrumented\nfun:inet6_option_init=uninstrumented\nfun:inet6_option_next=uninstrumented\nfun:inet6_option_space=uninstrumented\nfun:inet6_rth_add=uninstrumented\nfun:inet6_rth_getaddr=uninstrumented\nfun:inet6_rth_init=uninstrumented\nfun:inet6_rth_reverse=uninstrumented\nfun:inet6_rth_segments=uninstrumented\nfun:inet6_rth_space=uninstrumented\nfun:inet_addr=uninstrumented\nfun:inet_aton=uninstrumented\nfun:inet_lnaof=uninstrumented\nfun:inet_makeaddr=uninstrumented\nfun:inet_net_ntop=uninstrumented\nfun:inet_net_pton=uninstrumented\nfun:inet_neta=uninstrumented\nfun:inet_netof=uninstrumented\nfun:inet_network=uninstrumented\nfun:inet_nsap_addr=uninstrumented\nfun:inet_nsap_ntoa=uninstrumented\nfun:inet_ntoa=uninstrumented\nfun:inet_ntop=uninstrumented\nfun:inet_pton=uninstrumented\nfun:init_module=uninstrumented\nfun:initgroups=uninstrumented\nfun:initstate=uninstrumented\nfun:initstate_r=uninstrumented\nfun:innetgr=uninstrumented\nfun:inotify_add_watch=uninstrumented\nfun:inotify_init=uninstrumented\nfun:inotify_init1=uninstrumented\nfun:inotify_rm_watch=uninstrumented\nfun:insque=uninstrumented\nfun:ioctl=uninstrumented\nfun:ioperm=uninstrumented\nfun:iopl=uninstrumented\nfun:iruserok=uninstrumented\nfun:iruserok_af=uninstrumented\nfun:isalnum=uninstrumented\nfun:isalnum_l=uninstrumented\nfun:isalpha=uninstrumented\nfun:isalpha_l=uninstrumented\nfun:isascii=uninstrumented\nfun:isastream=uninstrumented\nfun:isatty=uninstrumented\nfun:isblank=uninstrumented\nfun:isblank_l=uninstrumented\nfun:iscntrl=uninstrumented\nfun:iscntrl_l=uninstrumented\nfun:isctype=uninstrumented\nfun:isdigit=uninstrumented\nfun:isdigit_l=uninstrumented\nfun:isfdtype=uninstrumented\nfun:isgraph=uninstrumented\nfun:isgraph_l=uninstrumented\nfun:isinf=uninstrumented\nfun:isinfd128=uninstrumented\nfun:isinfd32=uninstrumented\nfun:isinfd64=uninstrumented\nfun:isinff=uninstrumented\nfun:isinfl=uninstrumented\nfun:islower=uninstrumented\nfun:islower_l=uninstrumented\nfun:isnan=uninstrumented\nfun:isnanf=uninstrumented\nfun:isnanl=uninstrumented\nfun:isprint=uninstrumented\nfun:isprint_l=uninstrumented\nfun:ispunct=uninstrumented\nfun:ispunct_l=uninstrumented\nfun:isspace=uninstrumented\nfun:isspace_l=uninstrumented\nfun:isupper=uninstrumented\nfun:isupper_l=uninstrumented\nfun:iswalnum=uninstrumented\nfun:iswalnum_l=uninstrumented\nfun:iswalpha=uninstrumented\nfun:iswalpha_l=uninstrumented\nfun:iswblank=uninstrumented\nfun:iswblank_l=uninstrumented\nfun:iswcntrl=uninstrumented\nfun:iswcntrl_l=uninstrumented\nfun:iswctype=uninstrumented\nfun:iswctype_l=uninstrumented\nfun:iswdigit=uninstrumented\nfun:iswdigit_l=uninstrumented\nfun:iswgraph=uninstrumented\nfun:iswgraph_l=uninstrumented\nfun:iswlower=uninstrumented\nfun:iswlower_l=uninstrumented\nfun:iswprint=uninstrumented\nfun:iswprint_l=uninstrumented\nfun:iswpunct=uninstrumented\nfun:iswpunct_l=uninstrumented\nfun:iswspace=uninstrumented\nfun:iswspace_l=uninstrumented\nfun:iswupper=uninstrumented\nfun:iswupper_l=uninstrumented\nfun:iswxdigit=uninstrumented\nfun:iswxdigit_l=uninstrumented\nfun:isxdigit=uninstrumented\nfun:isxdigit_l=uninstrumented\nfun:j0=uninstrumented\nfun:j0f=uninstrumented\nfun:j0l=uninstrumented\nfun:j1=uninstrumented\nfun:j1f=uninstrumented\nfun:j1l=uninstrumented\nfun:jn=uninstrumented\nfun:jnf=uninstrumented\nfun:jnl=uninstrumented\nfun:jrand48=uninstrumented\nfun:jrand48_r=uninstrumented\nfun:key_decryptsession=uninstrumented\nfun:key_decryptsession_pk=uninstrumented\nfun:key_encryptsession=uninstrumented\nfun:key_encryptsession_pk=uninstrumented\nfun:key_gendes=uninstrumented\nfun:key_get_conv=uninstrumented\nfun:key_secretkey_is_set=uninstrumented\nfun:key_setnet=uninstrumented\nfun:key_setsecret=uninstrumented\nfun:kill=uninstrumented\nfun:killpg=uninstrumented\nfun:klogctl=uninstrumented\nfun:l64a=uninstrumented\nfun:labs=uninstrumented\nfun:lchmod=uninstrumented\nfun:lchown=uninstrumented\nfun:lckpwdf=uninstrumented\nfun:lcong48=uninstrumented\nfun:lcong48_r=uninstrumented\nfun:ldexp=uninstrumented\nfun:ldexpf=uninstrumented\nfun:ldexpl=uninstrumented\nfun:ldiv=uninstrumented\nfun:lfind=uninstrumented\nfun:lgamma=uninstrumented\nfun:lgamma_r=uninstrumented\nfun:lgammaf=uninstrumented\nfun:lgammaf_r=uninstrumented\nfun:lgammal=uninstrumented\nfun:lgammal_r=uninstrumented\nfun:lgetxattr=uninstrumented\nfun:link=uninstrumented\nfun:linkat=uninstrumented\nfun:lio_listio=uninstrumented\nfun:lio_listio64=uninstrumented\nfun:listen=uninstrumented\nfun:listxattr=uninstrumented\nfun:llabs=uninstrumented\nfun:lldiv=uninstrumented\nfun:llistxattr=uninstrumented\nfun:llrint=uninstrumented\nfun:llrintf=uninstrumented\nfun:llrintl=uninstrumented\nfun:llround=uninstrumented\nfun:llroundf=uninstrumented\nfun:llroundl=uninstrumented\nfun:llseek=uninstrumented\nfun:localeconv=uninstrumented\nfun:localtime=uninstrumented\nfun:localtime_r=uninstrumented\nfun:lockf=uninstrumented\nfun:lockf64=uninstrumented\nfun:log=uninstrumented\nfun:log10=uninstrumented\nfun:log10f=uninstrumented\nfun:log10l=uninstrumented\nfun:log1p=uninstrumented\nfun:log1pf=uninstrumented\nfun:log1pl=uninstrumented\nfun:log2=uninstrumented\nfun:log2f=uninstrumented\nfun:log2l=uninstrumented\nfun:logb=uninstrumented\nfun:logbf=uninstrumented\nfun:logbl=uninstrumented\nfun:logf=uninstrumented\nfun:login=uninstrumented\nfun:login_tty=uninstrumented\nfun:logl=uninstrumented\nfun:logout=uninstrumented\nfun:logwtmp=uninstrumented\nfun:longjmp=uninstrumented\nfun:lrand48=uninstrumented\nfun:lrand48_r=uninstrumented\nfun:lremovexattr=uninstrumented\nfun:lrint=uninstrumented\nfun:lrintf=uninstrumented\nfun:lrintl=uninstrumented\nfun:lround=uninstrumented\nfun:lroundf=uninstrumented\nfun:lroundl=uninstrumented\nfun:lsearch=uninstrumented\nfun:lseek=uninstrumented\nfun:lseek64=uninstrumented\nfun:lsetxattr=uninstrumented\nfun:lstat=uninstrumented\nfun:lstat64=uninstrumented\nfun:lutimes=uninstrumented\nfun:madvise=uninstrumented\nfun:makecontext=uninstrumented\nfun:mallinfo=uninstrumented\nfun:malloc=uninstrumented\nfun:malloc_get_state=uninstrumented\nfun:malloc_info=uninstrumented\nfun:malloc_set_state=uninstrumented\nfun:malloc_stats=uninstrumented\nfun:malloc_trim=uninstrumented\nfun:malloc_usable_size=uninstrumented\nfun:mallopt=uninstrumented\nfun:matherr=uninstrumented\nfun:mblen=uninstrumented\nfun:mbrlen=uninstrumented\nfun:mbrtoc16=uninstrumented\nfun:mbrtoc32=uninstrumented\nfun:mbrtowc=uninstrumented\nfun:mbsinit=uninstrumented\nfun:mbsnrtowcs=uninstrumented\nfun:mbsrtowcs=uninstrumented\nfun:mbstowcs=uninstrumented\nfun:mbtowc=uninstrumented\nfun:mcheck=uninstrumented\nfun:mcheck_check_all=uninstrumented\nfun:mcheck_pedantic=uninstrumented\nfun:mcount=uninstrumented\nfun:memalign=uninstrumented\nfun:memccpy=uninstrumented\nfun:memchr=uninstrumented\nfun:memcmp=uninstrumented\nfun:memcpy=uninstrumented\nfun:memfrob=uninstrumented\nfun:memmem=uninstrumented\nfun:memmove=uninstrumented\nfun:mempcpy=uninstrumented\nfun:memrchr=uninstrumented\nfun:memset=uninstrumented\nfun:mincore=uninstrumented\nfun:mkdir=uninstrumented\nfun:mkdirat=uninstrumented\nfun:mkdtemp=uninstrumented\nfun:mkfifo=uninstrumented\nfun:mkfifoat=uninstrumented\nfun:mknod=uninstrumented\nfun:mknodat=uninstrumented\nfun:mkostemp=uninstrumented\nfun:mkostemp64=uninstrumented\nfun:mkostemps=uninstrumented\nfun:mkostemps64=uninstrumented\nfun:mkstemp=uninstrumented\nfun:mkstemp64=uninstrumented\nfun:mkstemps=uninstrumented\nfun:mkstemps64=uninstrumented\nfun:mktemp=uninstrumented\nfun:mktime=uninstrumented\nfun:mlock=uninstrumented\nfun:mlockall=uninstrumented\nfun:mmap=uninstrumented\nfun:mmap64=uninstrumented\nfun:modf=uninstrumented\nfun:modff=uninstrumented\nfun:modfl=uninstrumented\nfun:modify_ldt=uninstrumented\nfun:moncontrol=uninstrumented\nfun:monstartup=uninstrumented\nfun:mount=uninstrumented\nfun:mprobe=uninstrumented\nfun:mprotect=uninstrumented\nfun:mq_close=uninstrumented\nfun:mq_getattr=uninstrumented\nfun:mq_notify=uninstrumented\nfun:mq_open=uninstrumented\nfun:mq_receive=uninstrumented\nfun:mq_send=uninstrumented\nfun:mq_setattr=uninstrumented\nfun:mq_timedreceive=uninstrumented\nfun:mq_timedsend=uninstrumented\nfun:mq_unlink=uninstrumented\nfun:mrand48=uninstrumented\nfun:mrand48_r=uninstrumented\nfun:mremap=uninstrumented\nfun:msgctl=uninstrumented\nfun:msgget=uninstrumented\nfun:msgrcv=uninstrumented\nfun:msgsnd=uninstrumented\nfun:msync=uninstrumented\nfun:mtrace=uninstrumented\nfun:munlock=uninstrumented\nfun:munlockall=uninstrumented\nfun:munmap=uninstrumented\nfun:muntrace=uninstrumented\nfun:name_to_handle_at=uninstrumented\nfun:nan=uninstrumented\nfun:nanf=uninstrumented\nfun:nanl=uninstrumented\nfun:nanosleep=uninstrumented\nfun:nearbyint=uninstrumented\nfun:nearbyintf=uninstrumented\nfun:nearbyintl=uninstrumented\nfun:netname2host=uninstrumented\nfun:netname2user=uninstrumented\nfun:newlocale=uninstrumented\nfun:nextafter=uninstrumented\nfun:nextafterf=uninstrumented\nfun:nextafterl=uninstrumented\nfun:nexttoward=uninstrumented\nfun:nexttowardf=uninstrumented\nfun:nexttowardl=uninstrumented\nfun:nfsservctl=uninstrumented\nfun:nftw=uninstrumented\nfun:nftw64=uninstrumented\nfun:ngettext=uninstrumented\nfun:nice=uninstrumented\nfun:nis_add=uninstrumented\nfun:nis_add_entry=uninstrumented\nfun:nis_addmember=uninstrumented\nfun:nis_checkpoint=uninstrumented\nfun:nis_clone_directory=uninstrumented\nfun:nis_clone_object=uninstrumented\nfun:nis_clone_result=uninstrumented\nfun:nis_creategroup=uninstrumented\nfun:nis_destroy_object=uninstrumented\nfun:nis_destroygroup=uninstrumented\nfun:nis_dir_cmp=uninstrumented\nfun:nis_domain_of=uninstrumented\nfun:nis_domain_of_r=uninstrumented\nfun:nis_first_entry=uninstrumented\nfun:nis_free_directory=uninstrumented\nfun:nis_free_object=uninstrumented\nfun:nis_free_request=uninstrumented\nfun:nis_freenames=uninstrumented\nfun:nis_freeresult=uninstrumented\nfun:nis_freeservlist=uninstrumented\nfun:nis_freetags=uninstrumented\nfun:nis_getnames=uninstrumented\nfun:nis_getservlist=uninstrumented\nfun:nis_ismember=uninstrumented\nfun:nis_leaf_of=uninstrumented\nfun:nis_leaf_of_r=uninstrumented\nfun:nis_lerror=uninstrumented\nfun:nis_list=uninstrumented\nfun:nis_local_directory=uninstrumented\nfun:nis_local_group=uninstrumented\nfun:nis_local_host=uninstrumented\nfun:nis_local_principal=uninstrumented\nfun:nis_lookup=uninstrumented\nfun:nis_mkdir=uninstrumented\nfun:nis_modify=uninstrumented\nfun:nis_modify_entry=uninstrumented\nfun:nis_name_of=uninstrumented\nfun:nis_name_of_r=uninstrumented\nfun:nis_next_entry=uninstrumented\nfun:nis_perror=uninstrumented\nfun:nis_ping=uninstrumented\nfun:nis_print_directory=uninstrumented\nfun:nis_print_entry=uninstrumented\nfun:nis_print_group=uninstrumented\nfun:nis_print_group_entry=uninstrumented\nfun:nis_print_link=uninstrumented\nfun:nis_print_object=uninstrumented\nfun:nis_print_result=uninstrumented\nfun:nis_print_rights=uninstrumented\nfun:nis_print_table=uninstrumented\nfun:nis_read_obj=uninstrumented\nfun:nis_remove=uninstrumented\nfun:nis_remove_entry=uninstrumented\nfun:nis_removemember=uninstrumented\nfun:nis_rmdir=uninstrumented\nfun:nis_servstate=uninstrumented\nfun:nis_sperrno=uninstrumented\nfun:nis_sperror=uninstrumented\nfun:nis_sperror_r=uninstrumented\nfun:nis_stats=uninstrumented\nfun:nis_verifygroup=uninstrumented\nfun:nis_write_obj=uninstrumented\nfun:nl_langinfo=uninstrumented\nfun:nl_langinfo_l=uninstrumented\nfun:nrand48=uninstrumented\nfun:nrand48_r=uninstrumented\nfun:ns_datetosecs=uninstrumented\nfun:ns_format_ttl=uninstrumented\nfun:ns_get16=uninstrumented\nfun:ns_get32=uninstrumented\nfun:ns_initparse=uninstrumented\nfun:ns_makecanon=uninstrumented\nfun:ns_msg_getflag=uninstrumented\nfun:ns_name_compress=uninstrumented\nfun:ns_name_ntol=uninstrumented\nfun:ns_name_ntop=uninstrumented\nfun:ns_name_pack=uninstrumented\nfun:ns_name_pton=uninstrumented\nfun:ns_name_rollback=uninstrumented\nfun:ns_name_skip=uninstrumented\nfun:ns_name_uncompress=uninstrumented\nfun:ns_name_unpack=uninstrumented\nfun:ns_parse_ttl=uninstrumented\nfun:ns_parserr=uninstrumented\nfun:ns_put16=uninstrumented\nfun:ns_put32=uninstrumented\nfun:ns_samedomain=uninstrumented\nfun:ns_samename=uninstrumented\nfun:ns_skiprr=uninstrumented\nfun:ns_sprintrr=uninstrumented\nfun:ns_sprintrrf=uninstrumented\nfun:ns_subdomain=uninstrumented\nfun:ntohl=uninstrumented\nfun:ntohs=uninstrumented\nfun:ntp_adjtime=uninstrumented\nfun:ntp_gettime=uninstrumented\nfun:ntp_gettimex=uninstrumented\nfun:obstack_free=uninstrumented\nfun:obstack_printf=uninstrumented\nfun:obstack_vprintf=uninstrumented\nfun:on_exit=uninstrumented\nfun:open=uninstrumented\nfun:open64=uninstrumented\nfun:open_by_handle_at=uninstrumented\nfun:open_memstream=uninstrumented\nfun:open_wmemstream=uninstrumented\nfun:openat=uninstrumented\nfun:openat64=uninstrumented\nfun:opendir=uninstrumented\nfun:openlog=uninstrumented\nfun:openpty=uninstrumented\nfun:parse_printf_format=uninstrumented\nfun:passwd2des=uninstrumented\nfun:pathconf=uninstrumented\nfun:pause=uninstrumented\nfun:pclose=uninstrumented\nfun:perror=uninstrumented\nfun:personality=uninstrumented\nfun:pipe=uninstrumented\nfun:pipe2=uninstrumented\nfun:pivot_root=uninstrumented\nfun:pmap_getmaps=uninstrumented\nfun:pmap_getport=uninstrumented\nfun:pmap_rmtcall=uninstrumented\nfun:pmap_set=uninstrumented\nfun:pmap_unset=uninstrumented\nfun:poll=uninstrumented\nfun:popen=uninstrumented\nfun:posix_fadvise=uninstrumented\nfun:posix_fadvise64=uninstrumented\nfun:posix_fallocate=uninstrumented\nfun:posix_fallocate64=uninstrumented\nfun:posix_madvise=uninstrumented\nfun:posix_memalign=uninstrumented\nfun:posix_openpt=uninstrumented\nfun:posix_spawn=uninstrumented\nfun:posix_spawn_file_actions_addclose=uninstrumented\nfun:posix_spawn_file_actions_adddup2=uninstrumented\nfun:posix_spawn_file_actions_addopen=uninstrumented\nfun:posix_spawn_file_actions_destroy=uninstrumented\nfun:posix_spawn_file_actions_init=uninstrumented\nfun:posix_spawnattr_destroy=uninstrumented\nfun:posix_spawnattr_getflags=uninstrumented\nfun:posix_spawnattr_getpgroup=uninstrumented\nfun:posix_spawnattr_getschedparam=uninstrumented\nfun:posix_spawnattr_getschedpolicy=uninstrumented\nfun:posix_spawnattr_getsigdefault=uninstrumented\nfun:posix_spawnattr_getsigmask=uninstrumented\nfun:posix_spawnattr_init=uninstrumented\nfun:posix_spawnattr_setflags=uninstrumented\nfun:posix_spawnattr_setpgroup=uninstrumented\nfun:posix_spawnattr_setschedparam=uninstrumented\nfun:posix_spawnattr_setschedpolicy=uninstrumented\nfun:posix_spawnattr_setsigdefault=uninstrumented\nfun:posix_spawnattr_setsigmask=uninstrumented\nfun:posix_spawnp=uninstrumented\nfun:pow=uninstrumented\nfun:pow10=uninstrumented\nfun:pow10f=uninstrumented\nfun:pow10l=uninstrumented\nfun:powf=uninstrumented\nfun:powl=uninstrumented\nfun:ppoll=uninstrumented\nfun:prctl=uninstrumented\nfun:pread=uninstrumented\nfun:pread64=uninstrumented\nfun:preadv=uninstrumented\nfun:preadv64=uninstrumented\nfun:printf=uninstrumented\nfun:printf_size=uninstrumented\nfun:printf_size_info=uninstrumented\nfun:prlimit=uninstrumented\nfun:prlimit64=uninstrumented\nfun:process_vm_readv=uninstrumented\nfun:process_vm_writev=uninstrumented\nfun:profil=uninstrumented\nfun:pselect=uninstrumented\nfun:psiginfo=uninstrumented\nfun:psignal=uninstrumented\nfun:pthread_atfork=uninstrumented\nfun:pthread_attr_destroy=uninstrumented\nfun:pthread_attr_getaffinity_np=uninstrumented\nfun:pthread_attr_getdetachstate=uninstrumented\nfun:pthread_attr_getguardsize=uninstrumented\nfun:pthread_attr_getinheritsched=uninstrumented\nfun:pthread_attr_getschedparam=uninstrumented\nfun:pthread_attr_getschedpolicy=uninstrumented\nfun:pthread_attr_getscope=uninstrumented\nfun:pthread_attr_getstack=uninstrumented\nfun:pthread_attr_getstackaddr=uninstrumented\nfun:pthread_attr_getstacksize=uninstrumented\nfun:pthread_attr_init=uninstrumented\nfun:pthread_attr_setaffinity_np=uninstrumented\nfun:pthread_attr_setdetachstate=uninstrumented\nfun:pthread_attr_setguardsize=uninstrumented\nfun:pthread_attr_setinheritsched=uninstrumented\nfun:pthread_attr_setschedparam=uninstrumented\nfun:pthread_attr_setschedpolicy=uninstrumented\nfun:pthread_attr_setscope=uninstrumented\nfun:pthread_attr_setstack=uninstrumented\nfun:pthread_attr_setstackaddr=uninstrumented\nfun:pthread_attr_setstacksize=uninstrumented\nfun:pthread_barrier_destroy=uninstrumented\nfun:pthread_barrier_init=uninstrumented\nfun:pthread_barrier_wait=uninstrumented\nfun:pthread_barrierattr_destroy=uninstrumented\nfun:pthread_barrierattr_getpshared=uninstrumented\nfun:pthread_barrierattr_init=uninstrumented\nfun:pthread_barrierattr_setpshared=uninstrumented\nfun:pthread_cancel=uninstrumented\nfun:pthread_cond_broadcast=uninstrumented\nfun:pthread_cond_destroy=uninstrumented\nfun:pthread_cond_init=uninstrumented\nfun:pthread_cond_signal=uninstrumented\nfun:pthread_cond_timedwait=uninstrumented\nfun:pthread_cond_wait=uninstrumented\nfun:pthread_condattr_destroy=uninstrumented\nfun:pthread_condattr_getclock=uninstrumented\nfun:pthread_condattr_getpshared=uninstrumented\nfun:pthread_condattr_init=uninstrumented\nfun:pthread_condattr_setclock=uninstrumented\nfun:pthread_condattr_setpshared=uninstrumented\nfun:pthread_create=uninstrumented\nfun:pthread_detach=uninstrumented\nfun:pthread_equal=uninstrumented\nfun:pthread_exit=uninstrumented\nfun:pthread_getaffinity_np=uninstrumented\nfun:pthread_getattr_default_np=uninstrumented\nfun:pthread_getattr_np=uninstrumented\nfun:pthread_getconcurrency=uninstrumented\nfun:pthread_getcpuclockid=uninstrumented\nfun:pthread_getname_np=uninstrumented\nfun:pthread_getschedparam=uninstrumented\nfun:pthread_getspecific=uninstrumented\nfun:pthread_join=uninstrumented\nfun:pthread_key_create=uninstrumented\nfun:pthread_key_delete=uninstrumented\nfun:pthread_kill=uninstrumented\nfun:pthread_kill_other_threads_np=uninstrumented\nfun:pthread_mutex_consistent=uninstrumented\nfun:pthread_mutex_consistent_np=uninstrumented\nfun:pthread_mutex_destroy=uninstrumented\nfun:pthread_mutex_getprioceiling=uninstrumented\nfun:pthread_mutex_init=uninstrumented\nfun:pthread_mutex_lock=uninstrumented\nfun:pthread_mutex_setprioceiling=uninstrumented\nfun:pthread_mutex_timedlock=uninstrumented\nfun:pthread_mutex_trylock=uninstrumented\nfun:pthread_mutex_unlock=uninstrumented\nfun:pthread_mutexattr_destroy=uninstrumented\nfun:pthread_mutexattr_getkind_np=uninstrumented\nfun:pthread_mutexattr_getprioceiling=uninstrumented\nfun:pthread_mutexattr_getprotocol=uninstrumented\nfun:pthread_mutexattr_getpshared=uninstrumented\nfun:pthread_mutexattr_getrobust=uninstrumented\nfun:pthread_mutexattr_getrobust_np=uninstrumented\nfun:pthread_mutexattr_gettype=uninstrumented\nfun:pthread_mutexattr_init=uninstrumented\nfun:pthread_mutexattr_setkind_np=uninstrumented\nfun:pthread_mutexattr_setprioceiling=uninstrumented\nfun:pthread_mutexattr_setprotocol=uninstrumented\nfun:pthread_mutexattr_setpshared=uninstrumented\nfun:pthread_mutexattr_setrobust=uninstrumented\nfun:pthread_mutexattr_setrobust_np=uninstrumented\nfun:pthread_mutexattr_settype=uninstrumented\nfun:pthread_once=uninstrumented\nfun:pthread_rwlock_destroy=uninstrumented\nfun:pthread_rwlock_init=uninstrumented\nfun:pthread_rwlock_rdlock=uninstrumented\nfun:pthread_rwlock_timedrdlock=uninstrumented\nfun:pthread_rwlock_timedwrlock=uninstrumented\nfun:pthread_rwlock_tryrdlock=uninstrumented\nfun:pthread_rwlock_trywrlock=uninstrumented\nfun:pthread_rwlock_unlock=uninstrumented\nfun:pthread_rwlock_wrlock=uninstrumented\nfun:pthread_rwlockattr_destroy=uninstrumented\nfun:pthread_rwlockattr_getkind_np=uninstrumented\nfun:pthread_rwlockattr_getpshared=uninstrumented\nfun:pthread_rwlockattr_init=uninstrumented\nfun:pthread_rwlockattr_setkind_np=uninstrumented\nfun:pthread_rwlockattr_setpshared=uninstrumented\nfun:pthread_self=uninstrumented\nfun:pthread_setaffinity_np=uninstrumented\nfun:pthread_setattr_default_np=uninstrumented\nfun:pthread_setcancelstate=uninstrumented\nfun:pthread_setcanceltype=uninstrumented\nfun:pthread_setconcurrency=uninstrumented\nfun:pthread_setname_np=uninstrumented\nfun:pthread_setschedparam=uninstrumented\nfun:pthread_setschedprio=uninstrumented\nfun:pthread_setspecific=uninstrumented\nfun:pthread_sigmask=uninstrumented\nfun:pthread_sigqueue=uninstrumented\nfun:pthread_spin_destroy=uninstrumented\nfun:pthread_spin_init=uninstrumented\nfun:pthread_spin_lock=uninstrumented\nfun:pthread_spin_trylock=uninstrumented\nfun:pthread_spin_unlock=uninstrumented\nfun:pthread_testcancel=uninstrumented\nfun:pthread_timedjoin_np=uninstrumented\nfun:pthread_tryjoin_np=uninstrumented\nfun:pthread_yield=uninstrumented\nfun:ptrace=uninstrumented\nfun:ptsname=uninstrumented\nfun:ptsname_r=uninstrumented\nfun:putc=uninstrumented\nfun:putc_unlocked=uninstrumented\nfun:putchar=uninstrumented\nfun:putchar_unlocked=uninstrumented\nfun:putenv=uninstrumented\nfun:putgrent=uninstrumented\nfun:putmsg=uninstrumented\nfun:putpmsg=uninstrumented\nfun:putpwent=uninstrumented\nfun:puts=uninstrumented\nfun:putsgent=uninstrumented\nfun:putspent=uninstrumented\nfun:pututline=uninstrumented\nfun:pututxline=uninstrumented\nfun:putw=uninstrumented\nfun:putwc=uninstrumented\nfun:putwc_unlocked=uninstrumented\nfun:putwchar=uninstrumented\nfun:putwchar_unlocked=uninstrumented\nfun:pvalloc=uninstrumented\nfun:pwrite=uninstrumented\nfun:pwrite64=uninstrumented\nfun:pwritev=uninstrumented\nfun:pwritev64=uninstrumented\nfun:qecvt=uninstrumented\nfun:qecvt_r=uninstrumented\nfun:qfcvt=uninstrumented\nfun:qfcvt_r=uninstrumented\nfun:qgcvt=uninstrumented\nfun:qsort=uninstrumented\nfun:qsort_r=uninstrumented\nfun:query_module=uninstrumented\nfun:quick_exit=uninstrumented\nfun:quotactl=uninstrumented\nfun:raise=uninstrumented\nfun:rand=uninstrumented\nfun:rand_r=uninstrumented\nfun:random=uninstrumented\nfun:random_r=uninstrumented\nfun:rawmemchr=uninstrumented\nfun:rcmd=uninstrumented\nfun:rcmd_af=uninstrumented\nfun:re_comp=uninstrumented\nfun:re_compile_fastmap=uninstrumented\nfun:re_compile_pattern=uninstrumented\nfun:re_exec=uninstrumented\nfun:re_match=uninstrumented\nfun:re_match_2=uninstrumented\nfun:re_search=uninstrumented\nfun:re_search_2=uninstrumented\nfun:re_set_registers=uninstrumented\nfun:re_set_syntax=uninstrumented\nfun:read=uninstrumented\nfun:readColdStartFile=uninstrumented\nfun:readahead=uninstrumented\nfun:readdir=uninstrumented\nfun:readdir64=uninstrumented\nfun:readdir64_r=uninstrumented\nfun:readdir_r=uninstrumented\nfun:readlink=uninstrumented\nfun:readlinkat=uninstrumented\nfun:readv=uninstrumented\nfun:realloc=uninstrumented\nfun:realpath=uninstrumented\nfun:reboot=uninstrumented\nfun:recv=uninstrumented\nfun:recvfrom=uninstrumented\nfun:recvmmsg=uninstrumented\nfun:recvmsg=uninstrumented\nfun:regcomp=uninstrumented\nfun:regerror=uninstrumented\nfun:regexec=uninstrumented\nfun:regfree=uninstrumented\nfun:register_printf_function=uninstrumented\nfun:register_printf_modifier=uninstrumented\nfun:register_printf_specifier=uninstrumented\nfun:register_printf_type=uninstrumented\nfun:registerrpc=uninstrumented\nfun:remainder=uninstrumented\nfun:remainderf=uninstrumented\nfun:remainderl=uninstrumented\nfun:remap_file_pages=uninstrumented\nfun:remove=uninstrumented\nfun:removexattr=uninstrumented\nfun:remque=uninstrumented\nfun:remquo=uninstrumented\nfun:remquof=uninstrumented\nfun:remquol=uninstrumented\nfun:rename=uninstrumented\nfun:renameat=uninstrumented\nfun:res_gethostbyaddr=uninstrumented\nfun:res_gethostbyname=uninstrumented\nfun:res_gethostbyname2=uninstrumented\nfun:res_send_setqhook=uninstrumented\nfun:res_send_setrhook=uninstrumented\nfun:revoke=uninstrumented\nfun:rewind=uninstrumented\nfun:rewinddir=uninstrumented\nfun:rexec=uninstrumented\nfun:rexec_af=uninstrumented\nfun:rindex=uninstrumented\nfun:rint=uninstrumented\nfun:rintf=uninstrumented\nfun:rintl=uninstrumented\nfun:rmdir=uninstrumented\nfun:round=uninstrumented\nfun:roundf=uninstrumented\nfun:roundl=uninstrumented\nfun:rpmatch=uninstrumented\nfun:rresvport=uninstrumented\nfun:rresvport_af=uninstrumented\nfun:rtime=uninstrumented\nfun:ruserok=uninstrumented\nfun:ruserok_af=uninstrumented\nfun:ruserpass=uninstrumented\nfun:sbrk=uninstrumented\nfun:scalb=uninstrumented\nfun:scalbf=uninstrumented\nfun:scalbl=uninstrumented\nfun:scalbln=uninstrumented\nfun:scalblnf=uninstrumented\nfun:scalblnl=uninstrumented\nfun:scalbn=uninstrumented\nfun:scalbnf=uninstrumented\nfun:scalbnl=uninstrumented\nfun:scandir=uninstrumented\nfun:scandir64=uninstrumented\nfun:scandirat=uninstrumented\nfun:scandirat64=uninstrumented\nfun:scanf=uninstrumented\nfun:sched_get_priority_max=uninstrumented\nfun:sched_get_priority_min=uninstrumented\nfun:sched_getaffinity=uninstrumented\nfun:sched_getcpu=uninstrumented\nfun:sched_getparam=uninstrumented\nfun:sched_getscheduler=uninstrumented\nfun:sched_rr_get_interval=uninstrumented\nfun:sched_setaffinity=uninstrumented\nfun:sched_setparam=uninstrumented\nfun:sched_setscheduler=uninstrumented\nfun:sched_yield=uninstrumented\nfun:secure_getenv=uninstrumented\nfun:seed48=uninstrumented\nfun:seed48_r=uninstrumented\nfun:seekdir=uninstrumented\nfun:select=uninstrumented\nfun:sem_close=uninstrumented\nfun:sem_destroy=uninstrumented\nfun:sem_getvalue=uninstrumented\nfun:sem_init=uninstrumented\nfun:sem_open=uninstrumented\nfun:sem_post=uninstrumented\nfun:sem_timedwait=uninstrumented\nfun:sem_trywait=uninstrumented\nfun:sem_unlink=uninstrumented\nfun:sem_wait=uninstrumented\nfun:semctl=uninstrumented\nfun:semget=uninstrumented\nfun:semop=uninstrumented\nfun:semtimedop=uninstrumented\nfun:send=uninstrumented\nfun:sendfile=uninstrumented\nfun:sendfile64=uninstrumented\nfun:sendmmsg=uninstrumented\nfun:sendmsg=uninstrumented\nfun:sendto=uninstrumented\nfun:setaliasent=uninstrumented\nfun:setbuf=uninstrumented\nfun:setbuffer=uninstrumented\nfun:setcontext=uninstrumented\nfun:setdomainname=uninstrumented\nfun:setegid=uninstrumented\nfun:setenv=uninstrumented\nfun:seteuid=uninstrumented\nfun:setfsent=uninstrumented\nfun:setfsgid=uninstrumented\nfun:setfsuid=uninstrumented\nfun:setgid=uninstrumented\nfun:setgrent=uninstrumented\nfun:setgroups=uninstrumented\nfun:sethostent=uninstrumented\nfun:sethostid=uninstrumented\nfun:sethostname=uninstrumented\nfun:setipv4sourcefilter=uninstrumented\nfun:setitimer=uninstrumented\nfun:setjmp=uninstrumented\nfun:setkey=uninstrumented\nfun:setkey_r=uninstrumented\nfun:setlinebuf=uninstrumented\nfun:setlocale=uninstrumented\nfun:setlogin=uninstrumented\nfun:setlogmask=uninstrumented\nfun:setmntent=uninstrumented\nfun:setnetent=uninstrumented\nfun:setnetgrent=uninstrumented\nfun:setns=uninstrumented\nfun:setpgid=uninstrumented\nfun:setpgrp=uninstrumented\nfun:setpriority=uninstrumented\nfun:setprotoent=uninstrumented\nfun:setpwent=uninstrumented\nfun:setregid=uninstrumented\nfun:setresgid=uninstrumented\nfun:setresuid=uninstrumented\nfun:setreuid=uninstrumented\nfun:setrlimit=uninstrumented\nfun:setrlimit64=uninstrumented\nfun:setrpcent=uninstrumented\nfun:setservent=uninstrumented\nfun:setsgent=uninstrumented\nfun:setsid=uninstrumented\nfun:setsockopt=uninstrumented\nfun:setsourcefilter=uninstrumented\nfun:setspent=uninstrumented\nfun:setstate=uninstrumented\nfun:setstate_r=uninstrumented\nfun:settimeofday=uninstrumented\nfun:setttyent=uninstrumented\nfun:setuid=uninstrumented\nfun:setusershell=uninstrumented\nfun:setutent=uninstrumented\nfun:setutxent=uninstrumented\nfun:setvbuf=uninstrumented\nfun:setxattr=uninstrumented\nfun:sgetsgent=uninstrumented\nfun:sgetsgent_r=uninstrumented\nfun:sgetspent=uninstrumented\nfun:sgetspent_r=uninstrumented\nfun:shm_open=uninstrumented\nfun:shm_unlink=uninstrumented\nfun:shmat=uninstrumented\nfun:shmctl=uninstrumented\nfun:shmdt=uninstrumented\nfun:shmget=uninstrumented\nfun:shutdown=uninstrumented\nfun:sigaction=uninstrumented\nfun:sigaddset=uninstrumented\nfun:sigaltstack=uninstrumented\nfun:sigandset=uninstrumented\nfun:sigblock=uninstrumented\nfun:sigdelset=uninstrumented\nfun:sigemptyset=uninstrumented\nfun:sigfillset=uninstrumented\nfun:siggetmask=uninstrumented\nfun:sighold=uninstrumented\nfun:sigignore=uninstrumented\nfun:siginterrupt=uninstrumented\nfun:sigisemptyset=uninstrumented\nfun:sigismember=uninstrumented\nfun:siglongjmp=uninstrumented\nfun:signal=uninstrumented\nfun:signalfd=uninstrumented\nfun:significand=uninstrumented\nfun:significandf=uninstrumented\nfun:significandl=uninstrumented\nfun:sigorset=uninstrumented\nfun:sigpause=uninstrumented\nfun:sigpending=uninstrumented\nfun:sigprocmask=uninstrumented\nfun:sigqueue=uninstrumented\nfun:sigrelse=uninstrumented\nfun:sigreturn=uninstrumented\nfun:sigset=uninstrumented\nfun:sigsetmask=uninstrumented\nfun:sigstack=uninstrumented\nfun:sigsuspend=uninstrumented\nfun:sigtimedwait=uninstrumented\nfun:sigvec=uninstrumented\nfun:sigwait=uninstrumented\nfun:sigwaitinfo=uninstrumented\nfun:sin=uninstrumented\nfun:sincos=uninstrumented\nfun:sincosf=uninstrumented\nfun:sincosl=uninstrumented\nfun:sinf=uninstrumented\nfun:sinh=uninstrumented\nfun:sinhf=uninstrumented\nfun:sinhl=uninstrumented\nfun:sinl=uninstrumented\nfun:sleep=uninstrumented\nfun:snprintf=uninstrumented\nfun:sockatmark=uninstrumented\nfun:socket=uninstrumented\nfun:socketpair=uninstrumented\nfun:splice=uninstrumented\nfun:sprintf=uninstrumented\nfun:sprofil=uninstrumented\nfun:sqrt=uninstrumented\nfun:sqrtf=uninstrumented\nfun:sqrtl=uninstrumented\nfun:srand=uninstrumented\nfun:srand48=uninstrumented\nfun:srand48_r=uninstrumented\nfun:srandom=uninstrumented\nfun:srandom_r=uninstrumented\nfun:sscanf=uninstrumented\nfun:ssignal=uninstrumented\nfun:sstk=uninstrumented\nfun:stat=uninstrumented\nfun:stat64=uninstrumented\nfun:statfs=uninstrumented\nfun:statfs64=uninstrumented\nfun:statvfs=uninstrumented\nfun:statvfs64=uninstrumented\nfun:step=uninstrumented\nfun:stime=uninstrumented\nfun:stpcpy=uninstrumented\nfun:stpncpy=uninstrumented\nfun:strcasecmp=uninstrumented\nfun:strcasecmp_l=uninstrumented\nfun:strcasestr=uninstrumented\nfun:strcat=uninstrumented\nfun:strchr=uninstrumented\nfun:strchrnul=uninstrumented\nfun:strcmp=uninstrumented\nfun:strcoll=uninstrumented\nfun:strcoll_l=uninstrumented\nfun:strcpy=uninstrumented\nfun:strcspn=uninstrumented\nfun:strdup=uninstrumented\nfun:strerror=uninstrumented\nfun:strerror_l=uninstrumented\nfun:strerror_r=uninstrumented\nfun:strfmon=uninstrumented\nfun:strfmon_l=uninstrumented\nfun:strfry=uninstrumented\nfun:strftime=uninstrumented\nfun:strftime_l=uninstrumented\nfun:strlen=uninstrumented\nfun:strncasecmp=uninstrumented\nfun:strncasecmp_l=uninstrumented\nfun:strncat=uninstrumented\nfun:strncmp=uninstrumented\nfun:strncpy=uninstrumented\nfun:strndup=uninstrumented\nfun:strnlen=uninstrumented\nfun:strpbrk=uninstrumented\nfun:strptime=uninstrumented\nfun:strptime_l=uninstrumented\nfun:strrchr=uninstrumented\nfun:strsep=uninstrumented\nfun:strsignal=uninstrumented\nfun:strspn=uninstrumented\nfun:strstr=uninstrumented\nfun:strtod=uninstrumented\nfun:strtod_l=uninstrumented\nfun:strtof=uninstrumented\nfun:strtof_l=uninstrumented\nfun:strtoimax=uninstrumented\nfun:strtok=uninstrumented\nfun:strtok_r=uninstrumented\nfun:strtol=uninstrumented\nfun:strtol_l=uninstrumented\nfun:strtold=uninstrumented\nfun:strtold_l=uninstrumented\nfun:strtoll=uninstrumented\nfun:strtoll_l=uninstrumented\nfun:strtoq=uninstrumented\nfun:strtoul=uninstrumented\nfun:strtoul_l=uninstrumented\nfun:strtoull=uninstrumented\nfun:strtoull_l=uninstrumented\nfun:strtoumax=uninstrumented\nfun:strtouq=uninstrumented\nfun:strverscmp=uninstrumented\nfun:strxfrm=uninstrumented\nfun:strxfrm_l=uninstrumented\nfun:stty=uninstrumented\nfun:svc_exit=uninstrumented\nfun:svc_getreq=uninstrumented\nfun:svc_getreq_common=uninstrumented\nfun:svc_getreq_poll=uninstrumented\nfun:svc_getreqset=uninstrumented\nfun:svc_register=uninstrumented\nfun:svc_run=uninstrumented\nfun:svc_sendreply=uninstrumented\nfun:svc_unregister=uninstrumented\nfun:svcerr_auth=uninstrumented\nfun:svcerr_decode=uninstrumented\nfun:svcerr_noproc=uninstrumented\nfun:svcerr_noprog=uninstrumented\nfun:svcerr_progvers=uninstrumented\nfun:svcerr_systemerr=uninstrumented\nfun:svcerr_weakauth=uninstrumented\nfun:svcfd_create=uninstrumented\nfun:svcraw_create=uninstrumented\nfun:svctcp_create=uninstrumented\nfun:svcudp_bufcreate=uninstrumented\nfun:svcudp_create=uninstrumented\nfun:svcudp_enablecache=uninstrumented\nfun:svcunix_create=uninstrumented\nfun:svcunixfd_create=uninstrumented\nfun:swab=uninstrumented\nfun:swapcontext=uninstrumented\nfun:swapoff=uninstrumented\nfun:swapon=uninstrumented\nfun:swprintf=uninstrumented\nfun:swscanf=uninstrumented\nfun:symlink=uninstrumented\nfun:symlinkat=uninstrumented\nfun:sync=uninstrumented\nfun:sync_file_range=uninstrumented\nfun:syncfs=uninstrumented\nfun:syscall=uninstrumented\nfun:sysconf=uninstrumented\nfun:sysctl=uninstrumented\nfun:sysinfo=uninstrumented\nfun:syslog=uninstrumented\nfun:system=uninstrumented\nfun:sysv_signal=uninstrumented\nfun:tan=uninstrumented\nfun:tanf=uninstrumented\nfun:tanh=uninstrumented\nfun:tanhf=uninstrumented\nfun:tanhl=uninstrumented\nfun:tanl=uninstrumented\nfun:tcdrain=uninstrumented\nfun:tcflow=uninstrumented\nfun:tcflush=uninstrumented\nfun:tcgetattr=uninstrumented\nfun:tcgetpgrp=uninstrumented\nfun:tcgetsid=uninstrumented\nfun:tcsendbreak=uninstrumented\nfun:tcsetattr=uninstrumented\nfun:tcsetpgrp=uninstrumented\nfun:td_init=uninstrumented\nfun:td_log=uninstrumented\nfun:td_symbol_list=uninstrumented\nfun:td_ta_clear_event=uninstrumented\nfun:td_ta_delete=uninstrumented\nfun:td_ta_enable_stats=uninstrumented\nfun:td_ta_event_addr=uninstrumented\nfun:td_ta_event_getmsg=uninstrumented\nfun:td_ta_get_nthreads=uninstrumented\nfun:td_ta_get_ph=uninstrumented\nfun:td_ta_get_stats=uninstrumented\nfun:td_ta_map_id2thr=uninstrumented\nfun:td_ta_map_lwp2thr=uninstrumented\nfun:td_ta_new=uninstrumented\nfun:td_ta_reset_stats=uninstrumented\nfun:td_ta_set_event=uninstrumented\nfun:td_ta_setconcurrency=uninstrumented\nfun:td_ta_thr_iter=uninstrumented\nfun:td_ta_tsd_iter=uninstrumented\nfun:td_thr_clear_event=uninstrumented\nfun:td_thr_dbresume=uninstrumented\nfun:td_thr_dbsuspend=uninstrumented\nfun:td_thr_event_enable=uninstrumented\nfun:td_thr_event_getmsg=uninstrumented\nfun:td_thr_get_info=uninstrumented\nfun:td_thr_getfpregs=uninstrumented\nfun:td_thr_getgregs=uninstrumented\nfun:td_thr_getxregs=uninstrumented\nfun:td_thr_getxregsize=uninstrumented\nfun:td_thr_set_event=uninstrumented\nfun:td_thr_setfpregs=uninstrumented\nfun:td_thr_setgregs=uninstrumented\nfun:td_thr_setprio=uninstrumented\nfun:td_thr_setsigpending=uninstrumented\nfun:td_thr_setxregs=uninstrumented\nfun:td_thr_sigsetmask=uninstrumented\nfun:td_thr_tls_get_addr=uninstrumented\nfun:td_thr_tlsbase=uninstrumented\nfun:td_thr_tsd=uninstrumented\nfun:td_thr_validate=uninstrumented\nfun:tdelete=uninstrumented\nfun:tdestroy=uninstrumented\nfun:tee=uninstrumented\nfun:telldir=uninstrumented\nfun:tempnam=uninstrumented\nfun:textdomain=uninstrumented\nfun:tfind=uninstrumented\nfun:tgamma=uninstrumented\nfun:tgammaf=uninstrumented\nfun:tgammal=uninstrumented\nfun:time=uninstrumented\nfun:timegm=uninstrumented\nfun:timelocal=uninstrumented\nfun:timer_create=uninstrumented\nfun:timer_delete=uninstrumented\nfun:timer_getoverrun=uninstrumented\nfun:timer_gettime=uninstrumented\nfun:timer_settime=uninstrumented\nfun:timerfd_create=uninstrumented\nfun:timerfd_gettime=uninstrumented\nfun:timerfd_settime=uninstrumented\nfun:times=uninstrumented\nfun:timespec_get=uninstrumented\nfun:tmpfile=uninstrumented\nfun:tmpfile64=uninstrumented\nfun:tmpnam=uninstrumented\nfun:tmpnam_r=uninstrumented\nfun:toascii=uninstrumented\nfun:tolower=uninstrumented\nfun:tolower_l=uninstrumented\nfun:toupper=uninstrumented\nfun:toupper_l=uninstrumented\nfun:towctrans=uninstrumented\nfun:towctrans_l=uninstrumented\nfun:towlower=uninstrumented\nfun:towlower_l=uninstrumented\nfun:towupper=uninstrumented\nfun:towupper_l=uninstrumented\nfun:tr_break=uninstrumented\nfun:trunc=uninstrumented\nfun:truncate=uninstrumented\nfun:truncate64=uninstrumented\nfun:truncf=uninstrumented\nfun:truncl=uninstrumented\nfun:tsearch=uninstrumented\nfun:ttyname=uninstrumented\nfun:ttyname_r=uninstrumented\nfun:ttyslot=uninstrumented\nfun:twalk=uninstrumented\nfun:tzset=uninstrumented\nfun:ualarm=uninstrumented\nfun:ulckpwdf=uninstrumented\nfun:ulimit=uninstrumented\nfun:umask=uninstrumented\nfun:umount=uninstrumented\nfun:umount2=uninstrumented\nfun:uname=uninstrumented\nfun:ungetc=uninstrumented\nfun:ungetwc=uninstrumented\nfun:unlink=uninstrumented\nfun:unlinkat=uninstrumented\nfun:unlockpt=uninstrumented\nfun:unsetenv=uninstrumented\nfun:unshare=uninstrumented\nfun:updwtmp=uninstrumented\nfun:updwtmpx=uninstrumented\nfun:uselib=uninstrumented\nfun:uselocale=uninstrumented\nfun:user2netname=uninstrumented\nfun:usleep=uninstrumented\nfun:ustat=uninstrumented\nfun:utime=uninstrumented\nfun:utimensat=uninstrumented\nfun:utimes=uninstrumented\nfun:utmpname=uninstrumented\nfun:utmpxname=uninstrumented\nfun:valloc=uninstrumented\nfun:vasprintf=uninstrumented\nfun:vdprintf=uninstrumented\nfun:verr=uninstrumented\nfun:verrx=uninstrumented\nfun:versionsort=uninstrumented\nfun:versionsort64=uninstrumented\nfun:vfork=uninstrumented\nfun:vfprintf=uninstrumented\nfun:vfscanf=uninstrumented\nfun:vfwprintf=uninstrumented\nfun:vfwscanf=uninstrumented\nfun:vhangup=uninstrumented\nfun:vlimit=uninstrumented\nfun:vmsplice=uninstrumented\nfun:vprintf=uninstrumented\nfun:vscanf=uninstrumented\nfun:vsnprintf=uninstrumented\nfun:vsprintf=uninstrumented\nfun:vsscanf=uninstrumented\nfun:vswprintf=uninstrumented\nfun:vswscanf=uninstrumented\nfun:vsyslog=uninstrumented\nfun:vtimes=uninstrumented\nfun:vwarn=uninstrumented\nfun:vwarnx=uninstrumented\nfun:vwprintf=uninstrumented\nfun:vwscanf=uninstrumented\nfun:wait=uninstrumented\nfun:wait3=uninstrumented\nfun:wait4=uninstrumented\nfun:waitid=uninstrumented\nfun:waitpid=uninstrumented\nfun:warn=uninstrumented\nfun:warnx=uninstrumented\nfun:wcpcpy=uninstrumented\nfun:wcpncpy=uninstrumented\nfun:wcrtomb=uninstrumented\nfun:wcscasecmp=uninstrumented\nfun:wcscasecmp_l=uninstrumented\nfun:wcscat=uninstrumented\nfun:wcschr=uninstrumented\nfun:wcschrnul=uninstrumented\nfun:wcscmp=uninstrumented\nfun:wcscoll=uninstrumented\nfun:wcscoll_l=uninstrumented\nfun:wcscpy=uninstrumented\nfun:wcscspn=uninstrumented\nfun:wcsdup=uninstrumented\nfun:wcsftime=uninstrumented\nfun:wcsftime_l=uninstrumented\nfun:wcslen=uninstrumented\nfun:wcsncasecmp=uninstrumented\nfun:wcsncasecmp_l=uninstrumented\nfun:wcsncat=uninstrumented\nfun:wcsncmp=uninstrumented\nfun:wcsncpy=uninstrumented\nfun:wcsnlen=uninstrumented\nfun:wcsnrtombs=uninstrumented\nfun:wcspbrk=uninstrumented\nfun:wcsrchr=uninstrumented\nfun:wcsrtombs=uninstrumented\nfun:wcsspn=uninstrumented\nfun:wcsstr=uninstrumented\nfun:wcstod=uninstrumented\nfun:wcstod_l=uninstrumented\nfun:wcstof=uninstrumented\nfun:wcstof_l=uninstrumented\nfun:wcstoimax=uninstrumented\nfun:wcstok=uninstrumented\nfun:wcstol=uninstrumented\nfun:wcstol_l=uninstrumented\nfun:wcstold=uninstrumented\nfun:wcstold_l=uninstrumented\nfun:wcstoll=uninstrumented\nfun:wcstoll_l=uninstrumented\nfun:wcstombs=uninstrumented\nfun:wcstoq=uninstrumented\nfun:wcstoul=uninstrumented\nfun:wcstoul_l=uninstrumented\nfun:wcstoull=uninstrumented\nfun:wcstoull_l=uninstrumented\nfun:wcstoumax=uninstrumented\nfun:wcstouq=uninstrumented\nfun:wcswcs=uninstrumented\nfun:wcswidth=uninstrumented\nfun:wcsxfrm=uninstrumented\nfun:wcsxfrm_l=uninstrumented\nfun:wctob=uninstrumented\nfun:wctomb=uninstrumented\nfun:wctrans=uninstrumented\nfun:wctrans_l=uninstrumented\nfun:wctype=uninstrumented\nfun:wctype_l=uninstrumented\nfun:wcwidth=uninstrumented\nfun:wmemchr=uninstrumented\nfun:wmemcmp=uninstrumented\nfun:wmemcpy=uninstrumented\nfun:wmemmove=uninstrumented\nfun:wmempcpy=uninstrumented\nfun:wmemset=uninstrumented\nfun:wordexp=uninstrumented\nfun:wordfree=uninstrumented\nfun:wprintf=uninstrumented\nfun:write=uninstrumented\nfun:writeColdStartFile=uninstrumented\nfun:writev=uninstrumented\nfun:wscanf=uninstrumented\nfun:xdecrypt=uninstrumented\nfun:xdr_accepted_reply=uninstrumented\nfun:xdr_array=uninstrumented\nfun:xdr_authdes_cred=uninstrumented\nfun:xdr_authdes_verf=uninstrumented\nfun:xdr_authunix_parms=uninstrumented\nfun:xdr_bool=uninstrumented\nfun:xdr_bytes=uninstrumented\nfun:xdr_callhdr=uninstrumented\nfun:xdr_callmsg=uninstrumented\nfun:xdr_cback_data=uninstrumented\nfun:xdr_char=uninstrumented\nfun:xdr_cryptkeyarg=uninstrumented\nfun:xdr_cryptkeyarg2=uninstrumented\nfun:xdr_cryptkeyres=uninstrumented\nfun:xdr_des_block=uninstrumented\nfun:xdr_domainname=uninstrumented\nfun:xdr_double=uninstrumented\nfun:xdr_enum=uninstrumented\nfun:xdr_float=uninstrumented\nfun:xdr_free=uninstrumented\nfun:xdr_getcredres=uninstrumented\nfun:xdr_hyper=uninstrumented\nfun:xdr_int=uninstrumented\nfun:xdr_int16_t=uninstrumented\nfun:xdr_int32_t=uninstrumented\nfun:xdr_int64_t=uninstrumented\nfun:xdr_int8_t=uninstrumented\nfun:xdr_key_netstarg=uninstrumented\nfun:xdr_key_netstres=uninstrumented\nfun:xdr_keybuf=uninstrumented\nfun:xdr_keydat=uninstrumented\nfun:xdr_keystatus=uninstrumented\nfun:xdr_long=uninstrumented\nfun:xdr_longlong_t=uninstrumented\nfun:xdr_mapname=uninstrumented\nfun:xdr_netnamestr=uninstrumented\nfun:xdr_netobj=uninstrumented\nfun:xdr_obj_p=uninstrumented\nfun:xdr_opaque=uninstrumented\nfun:xdr_opaque_auth=uninstrumented\nfun:xdr_peername=uninstrumented\nfun:xdr_pmap=uninstrumented\nfun:xdr_pmaplist=uninstrumented\nfun:xdr_pointer=uninstrumented\nfun:xdr_quad_t=uninstrumented\nfun:xdr_reference=uninstrumented\nfun:xdr_rejected_reply=uninstrumented\nfun:xdr_replymsg=uninstrumented\nfun:xdr_rmtcall_args=uninstrumented\nfun:xdr_rmtcallres=uninstrumented\nfun:xdr_short=uninstrumented\nfun:xdr_sizeof=uninstrumented\nfun:xdr_string=uninstrumented\nfun:xdr_u_char=uninstrumented\nfun:xdr_u_hyper=uninstrumented\nfun:xdr_u_int=uninstrumented\nfun:xdr_u_long=uninstrumented\nfun:xdr_u_longlong_t=uninstrumented\nfun:xdr_u_quad_t=uninstrumented\nfun:xdr_u_short=uninstrumented\nfun:xdr_uint16_t=uninstrumented\nfun:xdr_uint32_t=uninstrumented\nfun:xdr_uint64_t=uninstrumented\nfun:xdr_uint8_t=uninstrumented\nfun:xdr_union=uninstrumented\nfun:xdr_unixcred=uninstrumented\nfun:xdr_valdat=uninstrumented\nfun:xdr_vector=uninstrumented\nfun:xdr_void=uninstrumented\nfun:xdr_wrapstring=uninstrumented\nfun:xdr_yp_buf=uninstrumented\nfun:xdr_ypall=uninstrumented\nfun:xdr_ypbind_binding=uninstrumented\nfun:xdr_ypbind_resp=uninstrumented\nfun:xdr_ypbind_resptype=uninstrumented\nfun:xdr_ypbind_setdom=uninstrumented\nfun:xdr_ypdelete_args=uninstrumented\nfun:xdr_ypmap_parms=uninstrumented\nfun:xdr_ypmaplist=uninstrumented\nfun:xdr_yppush_status=uninstrumented\nfun:xdr_yppushresp_xfr=uninstrumented\nfun:xdr_ypreq_key=uninstrumented\nfun:xdr_ypreq_nokey=uninstrumented\nfun:xdr_ypreq_xfr=uninstrumented\nfun:xdr_ypresp_all=uninstrumented\nfun:xdr_ypresp_key_val=uninstrumented\nfun:xdr_ypresp_maplist=uninstrumented\nfun:xdr_ypresp_master=uninstrumented\nfun:xdr_ypresp_order=uninstrumented\nfun:xdr_ypresp_val=uninstrumented\nfun:xdr_ypresp_xfr=uninstrumented\nfun:xdr_ypstat=uninstrumented\nfun:xdr_ypupdate_args=uninstrumented\nfun:xdr_ypxfrstat=uninstrumented\nfun:xdrmem_create=uninstrumented\nfun:xdrrec_create=uninstrumented\nfun:xdrrec_endofrecord=uninstrumented\nfun:xdrrec_eof=uninstrumented\nfun:xdrrec_skiprecord=uninstrumented\nfun:xdrstdio_create=uninstrumented\nfun:xencrypt=uninstrumented\nfun:xprt_register=uninstrumented\nfun:xprt_unregister=uninstrumented\nfun:y0=uninstrumented\nfun:y0f=uninstrumented\nfun:y0l=uninstrumented\nfun:y1=uninstrumented\nfun:y1f=uninstrumented\nfun:y1l=uninstrumented\nfun:yn=uninstrumented\nfun:ynf=uninstrumented\nfun:ynl=uninstrumented\nfun:yp_all=uninstrumented\nfun:yp_bind=uninstrumented\nfun:yp_first=uninstrumented\nfun:yp_get_default_domain=uninstrumented\nfun:yp_maplist=uninstrumented\nfun:yp_master=uninstrumented\nfun:yp_match=uninstrumented\nfun:yp_next=uninstrumented\nfun:yp_order=uninstrumented\nfun:yp_unbind=uninstrumented\nfun:yp_update=uninstrumented\nfun:ypbinderr_string=uninstrumented\nfun:yperr_string=uninstrumented\nfun:ypprot_err=uninstrumented\n"
  },
  {
    "path": "runtime/dfsan/libc_ubuntu1804_abilist.txt",
    "content": "fun:_Exit=uninstrumented\nfun:_IO_adjust_column=uninstrumented\nfun:_IO_adjust_wcolumn=uninstrumented\nfun:_IO_default_doallocate=uninstrumented\nfun:_IO_default_finish=uninstrumented\nfun:_IO_default_pbackfail=uninstrumented\nfun:_IO_default_uflow=uninstrumented\nfun:_IO_default_xsgetn=uninstrumented\nfun:_IO_default_xsputn=uninstrumented\nfun:_IO_do_write=uninstrumented\nfun:_IO_doallocbuf=uninstrumented\nfun:_IO_enable_locks=uninstrumented\nfun:_IO_fclose=uninstrumented\nfun:_IO_fdopen=uninstrumented\nfun:_IO_feof=uninstrumented\nfun:_IO_ferror=uninstrumented\nfun:_IO_fflush=uninstrumented\nfun:_IO_fgetpos=uninstrumented\nfun:_IO_fgetpos64=uninstrumented\nfun:_IO_fgets=uninstrumented\nfun:_IO_file_attach=uninstrumented\nfun:_IO_file_close=uninstrumented\nfun:_IO_file_close_it=uninstrumented\nfun:_IO_file_doallocate=uninstrumented\nfun:_IO_file_finish=uninstrumented\nfun:_IO_file_fopen=uninstrumented\nfun:_IO_file_init=uninstrumented\nfun:_IO_file_open=uninstrumented\nfun:_IO_file_overflow=uninstrumented\nfun:_IO_file_read=uninstrumented\nfun:_IO_file_seek=uninstrumented\nfun:_IO_file_seekoff=uninstrumented\nfun:_IO_file_setbuf=uninstrumented\nfun:_IO_file_stat=uninstrumented\nfun:_IO_file_sync=uninstrumented\nfun:_IO_file_underflow=uninstrumented\nfun:_IO_file_write=uninstrumented\nfun:_IO_file_xsputn=uninstrumented\nfun:_IO_flockfile=uninstrumented\nfun:_IO_flush_all=uninstrumented\nfun:_IO_flush_all_linebuffered=uninstrumented\nfun:_IO_fopen=uninstrumented\nfun:_IO_fprintf=uninstrumented\nfun:_IO_fputs=uninstrumented\nfun:_IO_fread=uninstrumented\nfun:_IO_free_backup_area=uninstrumented\nfun:_IO_free_wbackup_area=uninstrumented\nfun:_IO_fsetpos=uninstrumented\nfun:_IO_fsetpos64=uninstrumented\nfun:_IO_ftell=uninstrumented\nfun:_IO_ftrylockfile=uninstrumented\nfun:_IO_funlockfile=uninstrumented\nfun:_IO_fwrite=uninstrumented\nfun:_IO_getc=uninstrumented\nfun:_IO_getline=uninstrumented\nfun:_IO_getline_info=uninstrumented\nfun:_IO_gets=uninstrumented\nfun:_IO_init=uninstrumented\nfun:_IO_init_marker=uninstrumented\nfun:_IO_init_wmarker=uninstrumented\nfun:_IO_iter_begin=uninstrumented\nfun:_IO_iter_end=uninstrumented\nfun:_IO_iter_file=uninstrumented\nfun:_IO_iter_next=uninstrumented\nfun:_IO_least_wmarker=uninstrumented\nfun:_IO_link_in=uninstrumented\nfun:_IO_list_lock=uninstrumented\nfun:_IO_list_resetlock=uninstrumented\nfun:_IO_list_unlock=uninstrumented\nfun:_IO_marker_delta=uninstrumented\nfun:_IO_marker_difference=uninstrumented\nfun:_IO_padn=uninstrumented\nfun:_IO_peekc_locked=uninstrumented\nfun:_IO_popen=uninstrumented\nfun:_IO_printf=uninstrumented\nfun:_IO_proc_close=uninstrumented\nfun:_IO_proc_open=uninstrumented\nfun:_IO_putc=uninstrumented\nfun:_IO_puts=uninstrumented\nfun:_IO_remove_marker=uninstrumented\nfun:_IO_seekmark=uninstrumented\nfun:_IO_seekoff=uninstrumented\nfun:_IO_seekpos=uninstrumented\nfun:_IO_seekwmark=uninstrumented\nfun:_IO_setb=uninstrumented\nfun:_IO_setbuffer=uninstrumented\nfun:_IO_setvbuf=uninstrumented\nfun:_IO_sgetn=uninstrumented\nfun:_IO_sprintf=uninstrumented\nfun:_IO_sputbackc=uninstrumented\nfun:_IO_sputbackwc=uninstrumented\nfun:_IO_sscanf=uninstrumented\nfun:_IO_str_init_readonly=uninstrumented\nfun:_IO_str_init_static=uninstrumented\nfun:_IO_str_overflow=uninstrumented\nfun:_IO_str_pbackfail=uninstrumented\nfun:_IO_str_seekoff=uninstrumented\nfun:_IO_str_underflow=uninstrumented\nfun:_IO_sungetc=uninstrumented\nfun:_IO_sungetwc=uninstrumented\nfun:_IO_switch_to_get_mode=uninstrumented\nfun:_IO_switch_to_main_wget_area=uninstrumented\nfun:_IO_switch_to_wbackup_area=uninstrumented\nfun:_IO_switch_to_wget_mode=uninstrumented\nfun:_IO_un_link=uninstrumented\nfun:_IO_ungetc=uninstrumented\nfun:_IO_unsave_markers=uninstrumented\nfun:_IO_unsave_wmarkers=uninstrumented\nfun:_IO_vfprintf=uninstrumented\nfun:_IO_vfscanf=uninstrumented\nfun:_IO_vsprintf=uninstrumented\nfun:_IO_wdefault_doallocate=uninstrumented\nfun:_IO_wdefault_finish=uninstrumented\nfun:_IO_wdefault_pbackfail=uninstrumented\nfun:_IO_wdefault_uflow=uninstrumented\nfun:_IO_wdefault_xsgetn=uninstrumented\nfun:_IO_wdefault_xsputn=uninstrumented\nfun:_IO_wdo_write=uninstrumented\nfun:_IO_wdoallocbuf=uninstrumented\nfun:_IO_wfile_overflow=uninstrumented\nfun:_IO_wfile_seekoff=uninstrumented\nfun:_IO_wfile_sync=uninstrumented\nfun:_IO_wfile_underflow=uninstrumented\nfun:_IO_wfile_xsputn=uninstrumented\nfun:_IO_wmarker_delta=uninstrumented\nfun:_IO_wsetb=uninstrumented\nfun:_Unwind_Backtrace=uninstrumented\nfun:_Unwind_DeleteException=uninstrumented\nfun:_Unwind_FindEnclosingFunction=uninstrumented\nfun:_Unwind_Find_FDE=uninstrumented\nfun:_Unwind_ForcedUnwind=uninstrumented\nfun:_Unwind_GetCFA=uninstrumented\nfun:_Unwind_GetDataRelBase=uninstrumented\nfun:_Unwind_GetGR=uninstrumented\nfun:_Unwind_GetIP=uninstrumented\nfun:_Unwind_GetIPInfo=uninstrumented\nfun:_Unwind_GetLanguageSpecificData=uninstrumented\nfun:_Unwind_GetRegionStart=uninstrumented\nfun:_Unwind_GetTextRelBase=uninstrumented\nfun:_Unwind_RaiseException=uninstrumented\nfun:_Unwind_Resume=uninstrumented\nfun:_Unwind_Resume_or_Rethrow=uninstrumented\nfun:_Unwind_SetGR=uninstrumented\nfun:_Unwind_SetIP=uninstrumented\nfun:__absvdi2=uninstrumented\nfun:__absvsi2=uninstrumented\nfun:__absvti2=uninstrumented\nfun:__acos_finite=uninstrumented\nfun:__acosf128_finite=uninstrumented\nfun:__acosf_finite=uninstrumented\nfun:__acosh_finite=uninstrumented\nfun:__acoshf128_finite=uninstrumented\nfun:__acoshf_finite=uninstrumented\nfun:__acoshl_finite=uninstrumented\nfun:__acosl_finite=uninstrumented\nfun:__addtf3=uninstrumented\nfun:__addvdi3=uninstrumented\nfun:__addvsi3=uninstrumented\nfun:__addvti3=uninstrumented\nfun:__adjtimex=uninstrumented\nfun:__arch_prctl=uninstrumented\nfun:__argz_count=uninstrumented\nfun:__argz_next=uninstrumented\nfun:__argz_stringify=uninstrumented\nfun:__ashlti3=uninstrumented\nfun:__ashrti3=uninstrumented\nfun:__asin_finite=uninstrumented\nfun:__asinf128_finite=uninstrumented\nfun:__asinf_finite=uninstrumented\nfun:__asinl_finite=uninstrumented\nfun:__asprintf=uninstrumented\nfun:__asprintf_chk=uninstrumented\nfun:__assert=uninstrumented\nfun:__assert_fail=uninstrumented\nfun:__assert_perror_fail=uninstrumented\nfun:__atan2_finite=uninstrumented\nfun:__atan2f128_finite=uninstrumented\nfun:__atan2f_finite=uninstrumented\nfun:__atan2l_finite=uninstrumented\nfun:__atanh_finite=uninstrumented\nfun:__atanhf128_finite=uninstrumented\nfun:__atanhf_finite=uninstrumented\nfun:__atanhl_finite=uninstrumented\nfun:__b64_ntop=uninstrumented\nfun:__b64_pton=uninstrumented\nfun:__backtrace=uninstrumented\nfun:__backtrace_symbols=uninstrumented\nfun:__backtrace_symbols_fd=uninstrumented\nfun:__bid128_abs=uninstrumented\nfun:__bid128_add=uninstrumented\nfun:__bid128_class=uninstrumented\nfun:__bid128_copy=uninstrumented\nfun:__bid128_copySign=uninstrumented\nfun:__bid128_div=uninstrumented\nfun:__bid128_fma=uninstrumented\nfun:__bid128_from_int32=uninstrumented\nfun:__bid128_from_int64=uninstrumented\nfun:__bid128_from_uint32=uninstrumented\nfun:__bid128_from_uint64=uninstrumented\nfun:__bid128_isCanonical=uninstrumented\nfun:__bid128_isFinite=uninstrumented\nfun:__bid128_isInf=uninstrumented\nfun:__bid128_isNaN=uninstrumented\nfun:__bid128_isNormal=uninstrumented\nfun:__bid128_isSignaling=uninstrumented\nfun:__bid128_isSigned=uninstrumented\nfun:__bid128_isSubnormal=uninstrumented\nfun:__bid128_isZero=uninstrumented\nfun:__bid128_mul=uninstrumented\nfun:__bid128_negate=uninstrumented\nfun:__bid128_quiet_equal=uninstrumented\nfun:__bid128_quiet_greater=uninstrumented\nfun:__bid128_quiet_greater_equal=uninstrumented\nfun:__bid128_quiet_greater_unordered=uninstrumented\nfun:__bid128_quiet_less=uninstrumented\nfun:__bid128_quiet_less_equal=uninstrumented\nfun:__bid128_quiet_less_unordered=uninstrumented\nfun:__bid128_quiet_not_equal=uninstrumented\nfun:__bid128_quiet_not_greater=uninstrumented\nfun:__bid128_quiet_not_less=uninstrumented\nfun:__bid128_quiet_ordered=uninstrumented\nfun:__bid128_quiet_unordered=uninstrumented\nfun:__bid128_radix=uninstrumented\nfun:__bid128_sameQuantum=uninstrumented\nfun:__bid128_signaling_greater=uninstrumented\nfun:__bid128_signaling_greater_equal=uninstrumented\nfun:__bid128_signaling_greater_unordered=uninstrumented\nfun:__bid128_signaling_less=uninstrumented\nfun:__bid128_signaling_less_equal=uninstrumented\nfun:__bid128_signaling_less_unordered=uninstrumented\nfun:__bid128_signaling_not_greater=uninstrumented\nfun:__bid128_signaling_not_less=uninstrumented\nfun:__bid128_sub=uninstrumented\nfun:__bid128_to_bid32=uninstrumented\nfun:__bid128_to_bid64=uninstrumented\nfun:__bid128_to_binary128=uninstrumented\nfun:__bid128_to_binary32=uninstrumented\nfun:__bid128_to_binary64=uninstrumented\nfun:__bid128_to_binary80=uninstrumented\nfun:__bid128_to_int32_ceil=uninstrumented\nfun:__bid128_to_int32_floor=uninstrumented\nfun:__bid128_to_int32_int=uninstrumented\nfun:__bid128_to_int32_rnint=uninstrumented\nfun:__bid128_to_int32_rninta=uninstrumented\nfun:__bid128_to_int32_xceil=uninstrumented\nfun:__bid128_to_int32_xfloor=uninstrumented\nfun:__bid128_to_int32_xint=uninstrumented\nfun:__bid128_to_int32_xrnint=uninstrumented\nfun:__bid128_to_int32_xrninta=uninstrumented\nfun:__bid128_to_int64_ceil=uninstrumented\nfun:__bid128_to_int64_floor=uninstrumented\nfun:__bid128_to_int64_int=uninstrumented\nfun:__bid128_to_int64_rnint=uninstrumented\nfun:__bid128_to_int64_rninta=uninstrumented\nfun:__bid128_to_int64_xceil=uninstrumented\nfun:__bid128_to_int64_xfloor=uninstrumented\nfun:__bid128_to_int64_xint=uninstrumented\nfun:__bid128_to_int64_xrnint=uninstrumented\nfun:__bid128_to_int64_xrninta=uninstrumented\nfun:__bid128_to_uint32_ceil=uninstrumented\nfun:__bid128_to_uint32_floor=uninstrumented\nfun:__bid128_to_uint32_int=uninstrumented\nfun:__bid128_to_uint32_rnint=uninstrumented\nfun:__bid128_to_uint32_rninta=uninstrumented\nfun:__bid128_to_uint32_xceil=uninstrumented\nfun:__bid128_to_uint32_xfloor=uninstrumented\nfun:__bid128_to_uint32_xint=uninstrumented\nfun:__bid128_to_uint32_xrnint=uninstrumented\nfun:__bid128_to_uint32_xrninta=uninstrumented\nfun:__bid128_to_uint64_ceil=uninstrumented\nfun:__bid128_to_uint64_floor=uninstrumented\nfun:__bid128_to_uint64_int=uninstrumented\nfun:__bid128_to_uint64_rnint=uninstrumented\nfun:__bid128_to_uint64_rninta=uninstrumented\nfun:__bid128_to_uint64_xceil=uninstrumented\nfun:__bid128_to_uint64_xfloor=uninstrumented\nfun:__bid128_to_uint64_xint=uninstrumented\nfun:__bid128_to_uint64_xrnint=uninstrumented\nfun:__bid128_to_uint64_xrninta=uninstrumented\nfun:__bid128_totalOrder=uninstrumented\nfun:__bid128_totalOrderMag=uninstrumented\nfun:__bid128dd_add=uninstrumented\nfun:__bid128dd_div=uninstrumented\nfun:__bid128dd_mul=uninstrumented\nfun:__bid128dd_sub=uninstrumented\nfun:__bid128ddd_fma=uninstrumented\nfun:__bid128ddq_fma=uninstrumented\nfun:__bid128dq_add=uninstrumented\nfun:__bid128dq_div=uninstrumented\nfun:__bid128dq_mul=uninstrumented\nfun:__bid128dq_sub=uninstrumented\nfun:__bid128dqd_fma=uninstrumented\nfun:__bid128dqq_fma=uninstrumented\nfun:__bid128qd_add=uninstrumented\nfun:__bid128qd_div=uninstrumented\nfun:__bid128qd_mul=uninstrumented\nfun:__bid128qd_sub=uninstrumented\nfun:__bid128qdd_fma=uninstrumented\nfun:__bid128qdq_fma=uninstrumented\nfun:__bid128qqd_fma=uninstrumented\nfun:__bid32_to_bid128=uninstrumented\nfun:__bid32_to_bid64=uninstrumented\nfun:__bid32_to_binary128=uninstrumented\nfun:__bid32_to_binary32=uninstrumented\nfun:__bid32_to_binary64=uninstrumented\nfun:__bid32_to_binary80=uninstrumented\nfun:__bid64_abs=uninstrumented\nfun:__bid64_add=uninstrumented\nfun:__bid64_class=uninstrumented\nfun:__bid64_copy=uninstrumented\nfun:__bid64_copySign=uninstrumented\nfun:__bid64_div=uninstrumented\nfun:__bid64_from_int32=uninstrumented\nfun:__bid64_from_int64=uninstrumented\nfun:__bid64_from_uint32=uninstrumented\nfun:__bid64_from_uint64=uninstrumented\nfun:__bid64_isCanonical=uninstrumented\nfun:__bid64_isFinite=uninstrumented\nfun:__bid64_isInf=uninstrumented\nfun:__bid64_isNaN=uninstrumented\nfun:__bid64_isNormal=uninstrumented\nfun:__bid64_isSignaling=uninstrumented\nfun:__bid64_isSigned=uninstrumented\nfun:__bid64_isSubnormal=uninstrumented\nfun:__bid64_isZero=uninstrumented\nfun:__bid64_mul=uninstrumented\nfun:__bid64_negate=uninstrumented\nfun:__bid64_quiet_equal=uninstrumented\nfun:__bid64_quiet_greater=uninstrumented\nfun:__bid64_quiet_greater_equal=uninstrumented\nfun:__bid64_quiet_greater_unordered=uninstrumented\nfun:__bid64_quiet_less=uninstrumented\nfun:__bid64_quiet_less_equal=uninstrumented\nfun:__bid64_quiet_less_unordered=uninstrumented\nfun:__bid64_quiet_not_equal=uninstrumented\nfun:__bid64_quiet_not_greater=uninstrumented\nfun:__bid64_quiet_not_less=uninstrumented\nfun:__bid64_quiet_ordered=uninstrumented\nfun:__bid64_quiet_unordered=uninstrumented\nfun:__bid64_radix=uninstrumented\nfun:__bid64_sameQuantum=uninstrumented\nfun:__bid64_signaling_greater=uninstrumented\nfun:__bid64_signaling_greater_equal=uninstrumented\nfun:__bid64_signaling_greater_unordered=uninstrumented\nfun:__bid64_signaling_less=uninstrumented\nfun:__bid64_signaling_less_equal=uninstrumented\nfun:__bid64_signaling_less_unordered=uninstrumented\nfun:__bid64_signaling_not_greater=uninstrumented\nfun:__bid64_signaling_not_less=uninstrumented\nfun:__bid64_sub=uninstrumented\nfun:__bid64_to_bid128=uninstrumented\nfun:__bid64_to_bid32=uninstrumented\nfun:__bid64_to_binary128=uninstrumented\nfun:__bid64_to_binary32=uninstrumented\nfun:__bid64_to_binary64=uninstrumented\nfun:__bid64_to_binary80=uninstrumented\nfun:__bid64_to_int32_ceil=uninstrumented\nfun:__bid64_to_int32_floor=uninstrumented\nfun:__bid64_to_int32_int=uninstrumented\nfun:__bid64_to_int32_rnint=uninstrumented\nfun:__bid64_to_int32_rninta=uninstrumented\nfun:__bid64_to_int32_xceil=uninstrumented\nfun:__bid64_to_int32_xfloor=uninstrumented\nfun:__bid64_to_int32_xint=uninstrumented\nfun:__bid64_to_int32_xrnint=uninstrumented\nfun:__bid64_to_int32_xrninta=uninstrumented\nfun:__bid64_to_int64_ceil=uninstrumented\nfun:__bid64_to_int64_floor=uninstrumented\nfun:__bid64_to_int64_int=uninstrumented\nfun:__bid64_to_int64_rnint=uninstrumented\nfun:__bid64_to_int64_rninta=uninstrumented\nfun:__bid64_to_int64_xceil=uninstrumented\nfun:__bid64_to_int64_xfloor=uninstrumented\nfun:__bid64_to_int64_xint=uninstrumented\nfun:__bid64_to_int64_xrnint=uninstrumented\nfun:__bid64_to_int64_xrninta=uninstrumented\nfun:__bid64_to_uint32_ceil=uninstrumented\nfun:__bid64_to_uint32_floor=uninstrumented\nfun:__bid64_to_uint32_int=uninstrumented\nfun:__bid64_to_uint32_rnint=uninstrumented\nfun:__bid64_to_uint32_rninta=uninstrumented\nfun:__bid64_to_uint32_xceil=uninstrumented\nfun:__bid64_to_uint32_xfloor=uninstrumented\nfun:__bid64_to_uint32_xint=uninstrumented\nfun:__bid64_to_uint32_xrnint=uninstrumented\nfun:__bid64_to_uint32_xrninta=uninstrumented\nfun:__bid64_to_uint64_ceil=uninstrumented\nfun:__bid64_to_uint64_floor=uninstrumented\nfun:__bid64_to_uint64_int=uninstrumented\nfun:__bid64_to_uint64_rnint=uninstrumented\nfun:__bid64_to_uint64_rninta=uninstrumented\nfun:__bid64_to_uint64_xceil=uninstrumented\nfun:__bid64_to_uint64_xfloor=uninstrumented\nfun:__bid64_to_uint64_xint=uninstrumented\nfun:__bid64_to_uint64_xrnint=uninstrumented\nfun:__bid64_to_uint64_xrninta=uninstrumented\nfun:__bid64_totalOrder=uninstrumented\nfun:__bid64_totalOrderMag=uninstrumented\nfun:__bid64ddq_fma=uninstrumented\nfun:__bid64dq_add=uninstrumented\nfun:__bid64dq_div=uninstrumented\nfun:__bid64dq_mul=uninstrumented\nfun:__bid64dq_sub=uninstrumented\nfun:__bid64dqd_fma=uninstrumented\nfun:__bid64dqq_fma=uninstrumented\nfun:__bid64qd_add=uninstrumented\nfun:__bid64qd_div=uninstrumented\nfun:__bid64qd_mul=uninstrumented\nfun:__bid64qd_sub=uninstrumented\nfun:__bid64qdd_fma=uninstrumented\nfun:__bid64qdq_fma=uninstrumented\nfun:__bid64qq_add=uninstrumented\nfun:__bid64qq_div=uninstrumented\nfun:__bid64qq_mul=uninstrumented\nfun:__bid64qq_sub=uninstrumented\nfun:__bid64qqd_fma=uninstrumented\nfun:__bid64qqq_fma=uninstrumented\nfun:__bid_adddd3=uninstrumented\nfun:__bid_addsd3=uninstrumented\nfun:__bid_addtd3=uninstrumented\nfun:__bid_divdd3=uninstrumented\nfun:__bid_divsd3=uninstrumented\nfun:__bid_divtd3=uninstrumented\nfun:__bid_eqdd2=uninstrumented\nfun:__bid_eqsd2=uninstrumented\nfun:__bid_eqtd2=uninstrumented\nfun:__bid_extendddtd2=uninstrumented\nfun:__bid_extendddtf=uninstrumented\nfun:__bid_extendddxf=uninstrumented\nfun:__bid_extenddfdd=uninstrumented\nfun:__bid_extenddftd=uninstrumented\nfun:__bid_extendsddd2=uninstrumented\nfun:__bid_extendsddf=uninstrumented\nfun:__bid_extendsdtd2=uninstrumented\nfun:__bid_extendsdtf=uninstrumented\nfun:__bid_extendsdxf=uninstrumented\nfun:__bid_extendsfdd=uninstrumented\nfun:__bid_extendsfsd=uninstrumented\nfun:__bid_extendsftd=uninstrumented\nfun:__bid_extendtftd=uninstrumented\nfun:__bid_extendxftd=uninstrumented\nfun:__bid_fixdddi=uninstrumented\nfun:__bid_fixddsi=uninstrumented\nfun:__bid_fixsddi=uninstrumented\nfun:__bid_fixsdsi=uninstrumented\nfun:__bid_fixtddi=uninstrumented\nfun:__bid_fixtdsi=uninstrumented\nfun:__bid_fixunsdddi=uninstrumented\nfun:__bid_fixunsddsi=uninstrumented\nfun:__bid_fixunssddi=uninstrumented\nfun:__bid_fixunssdsi=uninstrumented\nfun:__bid_fixunstddi=uninstrumented\nfun:__bid_fixunstdsi=uninstrumented\nfun:__bid_floatdidd=uninstrumented\nfun:__bid_floatdisd=uninstrumented\nfun:__bid_floatditd=uninstrumented\nfun:__bid_floatsidd=uninstrumented\nfun:__bid_floatsisd=uninstrumented\nfun:__bid_floatsitd=uninstrumented\nfun:__bid_floatunsdidd=uninstrumented\nfun:__bid_floatunsdisd=uninstrumented\nfun:__bid_floatunsditd=uninstrumented\nfun:__bid_floatunssidd=uninstrumented\nfun:__bid_floatunssisd=uninstrumented\nfun:__bid_floatunssitd=uninstrumented\nfun:__bid_gedd2=uninstrumented\nfun:__bid_gesd2=uninstrumented\nfun:__bid_getd2=uninstrumented\nfun:__bid_gtdd2=uninstrumented\nfun:__bid_gtsd2=uninstrumented\nfun:__bid_gttd2=uninstrumented\nfun:__bid_ledd2=uninstrumented\nfun:__bid_lesd2=uninstrumented\nfun:__bid_letd2=uninstrumented\nfun:__bid_ltdd2=uninstrumented\nfun:__bid_ltsd2=uninstrumented\nfun:__bid_lttd2=uninstrumented\nfun:__bid_muldd3=uninstrumented\nfun:__bid_mulsd3=uninstrumented\nfun:__bid_multd3=uninstrumented\nfun:__bid_nedd2=uninstrumented\nfun:__bid_nesd2=uninstrumented\nfun:__bid_netd2=uninstrumented\nfun:__bid_round128_19_38=uninstrumented\nfun:__bid_round192_39_57=uninstrumented\nfun:__bid_round256_58_76=uninstrumented\nfun:__bid_round64_2_18=uninstrumented\nfun:__bid_subdd3=uninstrumented\nfun:__bid_subsd3=uninstrumented\nfun:__bid_subtd3=uninstrumented\nfun:__bid_truncdddf=uninstrumented\nfun:__bid_truncddsd2=uninstrumented\nfun:__bid_truncddsf=uninstrumented\nfun:__bid_truncdfsd=uninstrumented\nfun:__bid_truncsdsf=uninstrumented\nfun:__bid_trunctddd2=uninstrumented\nfun:__bid_trunctddf=uninstrumented\nfun:__bid_trunctdsd2=uninstrumented\nfun:__bid_trunctdsf=uninstrumented\nfun:__bid_trunctdtf=uninstrumented\nfun:__bid_trunctdxf=uninstrumented\nfun:__bid_trunctfdd=uninstrumented\nfun:__bid_trunctfsd=uninstrumented\nfun:__bid_truncxfdd=uninstrumented\nfun:__bid_truncxfsd=uninstrumented\nfun:__bid_unorddd2=uninstrumented\nfun:__bid_unordsd2=uninstrumented\nfun:__bid_unordtd2=uninstrumented\nfun:__binary128_to_bid128=uninstrumented\nfun:__binary128_to_bid32=uninstrumented\nfun:__binary128_to_bid64=uninstrumented\nfun:__binary32_to_bid128=uninstrumented\nfun:__binary32_to_bid32=uninstrumented\nfun:__binary32_to_bid64=uninstrumented\nfun:__binary64_to_bid128=uninstrumented\nfun:__binary64_to_bid32=uninstrumented\nfun:__binary64_to_bid64=uninstrumented\nfun:__binary80_to_bid128=uninstrumented\nfun:__binary80_to_bid32=uninstrumented\nfun:__binary80_to_bid64=uninstrumented\nfun:__bsd_getpgrp=uninstrumented\nfun:__bswapdi2=uninstrumented\nfun:__bswapsi2=uninstrumented\nfun:__bzero=uninstrumented\nfun:__call_tls_dtors=uninstrumented\nfun:__chk_fail=uninstrumented\nfun:__clear_cache=uninstrumented\nfun:__clock_getcpuclockid=uninstrumented\nfun:__clock_getres=uninstrumented\nfun:__clock_gettime=uninstrumented\nfun:__clock_nanosleep=uninstrumented\nfun:__clock_settime=uninstrumented\nfun:__clog10=uninstrumented\nfun:__clog10f=uninstrumented\nfun:__clog10l=uninstrumented\nfun:__clone=uninstrumented\nfun:__close=uninstrumented\nfun:__close_nocancel=uninstrumented\nfun:__clrsbdi2=uninstrumented\nfun:__clrsbti2=uninstrumented\nfun:__clzdi2=uninstrumented\nfun:__clzti2=uninstrumented\nfun:__cmpti2=uninstrumented\nfun:__cmsg_nxthdr=uninstrumented\nfun:__confstr_chk=uninstrumented\nfun:__connect=uninstrumented\nfun:__copy_grp=uninstrumented\nfun:__cosh_finite=uninstrumented\nfun:__coshf128_finite=uninstrumented\nfun:__coshf_finite=uninstrumented\nfun:__coshl_finite=uninstrumented\nfun:__cpu_indicator_init=uninstrumented\nfun:__create_ib_request=uninstrumented\nfun:__ctype_b_loc=uninstrumented\nfun:__ctype_get_mb_cur_max=uninstrumented\nfun:__ctype_init=uninstrumented\nfun:__ctype_tolower_loc=uninstrumented\nfun:__ctype_toupper_loc=uninstrumented\nfun:__ctzdi2=uninstrumented\nfun:__ctzti2=uninstrumented\nfun:__cxa_at_quick_exit=uninstrumented\nfun:__cxa_atexit=uninstrumented\nfun:__cxa_finalize=uninstrumented\nfun:__cxa_thread_atexit_impl=uninstrumented\nfun:__cyg_profile_func_enter=uninstrumented\nfun:__cyg_profile_func_exit=uninstrumented\nfun:__dcgettext=uninstrumented\nfun:__default_morecore=uninstrumented\nfun:__deregister_frame=uninstrumented\nfun:__deregister_frame_info=uninstrumented\nfun:__deregister_frame_info_bases=uninstrumented\nfun:__dfp_clear_except=uninstrumented\nfun:__dfp_get_round=uninstrumented\nfun:__dfp_raise_except=uninstrumented\nfun:__dfp_set_round=uninstrumented\nfun:__dfp_test_except=uninstrumented\nfun:__dgettext=uninstrumented\nfun:__divdc3=uninstrumented\nfun:__divmodti4=uninstrumented\nfun:__divsc3=uninstrumented\nfun:__divtc3=uninstrumented\nfun:__divtf3=uninstrumented\nfun:__divti3=uninstrumented\nfun:__divxc3=uninstrumented\nfun:__dn_comp=uninstrumented\nfun:__dn_count_labels=uninstrumented\nfun:__dn_expand=uninstrumented\nfun:__dn_skipname=uninstrumented\nfun:__do_niscall3=uninstrumented\nfun:__dprintf_chk=uninstrumented\nfun:__dup2=uninstrumented\nfun:__duplocale=uninstrumented\nfun:__emutls_get_address=uninstrumented\nfun:__emutls_register_common=uninstrumented\nfun:__enable_execute_stack=uninstrumented\nfun:__endmntent=uninstrumented\nfun:__eprintf=uninstrumented\nfun:__eqtf2=uninstrumented\nfun:__errno_location=uninstrumented\nfun:__exp10_finite=uninstrumented\nfun:__exp10f128_finite=uninstrumented\nfun:__exp10f_finite=uninstrumented\nfun:__exp10l_finite=uninstrumented\nfun:__exp2_finite=uninstrumented\nfun:__exp2f128_finite=uninstrumented\nfun:__exp2f_finite=uninstrumented\nfun:__exp2l_finite=uninstrumented\nfun:__exp_finite=uninstrumented\nfun:__expf128_finite=uninstrumented\nfun:__expf_finite=uninstrumented\nfun:__expl_finite=uninstrumented\nfun:__explicit_bzero_chk=uninstrumented\nfun:__extenddftf2=uninstrumented\nfun:__extendsftf2=uninstrumented\nfun:__extendxftf2=uninstrumented\nfun:__fbufsize=uninstrumented\nfun:__fcntl=uninstrumented\nfun:__fdelt_chk=uninstrumented\nfun:__fdelt_warn=uninstrumented\nfun:__fentry__=uninstrumented\nfun:__ffs=uninstrumented\nfun:__ffsdi2=uninstrumented\nfun:__ffsti2=uninstrumented\nfun:__fgets_chk=uninstrumented\nfun:__fgets_unlocked_chk=uninstrumented\nfun:__fgetws_chk=uninstrumented\nfun:__fgetws_unlocked_chk=uninstrumented\nfun:__finite=uninstrumented\nfun:__finitef=uninstrumented\nfun:__finitef128=uninstrumented\nfun:__finitel=uninstrumented\nfun:__fixdfti=uninstrumented\nfun:__fixsfti=uninstrumented\nfun:__fixtfdi=uninstrumented\nfun:__fixtfsi=uninstrumented\nfun:__fixtfti=uninstrumented\nfun:__fixunsdfdi=uninstrumented\nfun:__fixunsdfti=uninstrumented\nfun:__fixunssfdi=uninstrumented\nfun:__fixunssfti=uninstrumented\nfun:__fixunstfdi=uninstrumented\nfun:__fixunstfsi=uninstrumented\nfun:__fixunstfti=uninstrumented\nfun:__fixunsxfdi=uninstrumented\nfun:__fixunsxfti=uninstrumented\nfun:__fixxfti=uninstrumented\nfun:__flbf=uninstrumented\nfun:__floatditf=uninstrumented\nfun:__floatsitf=uninstrumented\nfun:__floattidf=uninstrumented\nfun:__floattisf=uninstrumented\nfun:__floattitf=uninstrumented\nfun:__floattixf=uninstrumented\nfun:__floatunditf=uninstrumented\nfun:__floatunsitf=uninstrumented\nfun:__floatuntidf=uninstrumented\nfun:__floatuntisf=uninstrumented\nfun:__floatuntitf=uninstrumented\nfun:__floatuntixf=uninstrumented\nfun:__fmod_finite=uninstrumented\nfun:__fmodf128_finite=uninstrumented\nfun:__fmodf_finite=uninstrumented\nfun:__fmodl_finite=uninstrumented\nfun:__follow_path=uninstrumented\nfun:__fork=uninstrumented\nfun:__fortify_fail=uninstrumented\nfun:__fp_nquery=uninstrumented\nfun:__fp_query=uninstrumented\nfun:__fp_resstat=uninstrumented\nfun:__fpclassify=uninstrumented\nfun:__fpclassifyf=uninstrumented\nfun:__fpclassifyf128=uninstrumented\nfun:__fpclassifyl=uninstrumented\nfun:__fpending=uninstrumented\nfun:__fprintf_chk=uninstrumented\nfun:__fpurge=uninstrumented\nfun:__fread_chk=uninstrumented\nfun:__fread_unlocked_chk=uninstrumented\nfun:__freadable=uninstrumented\nfun:__freading=uninstrumented\nfun:__free_fdresult=uninstrumented\nfun:__freelocale=uninstrumented\nfun:__fsetlocking=uninstrumented\nfun:__fstat=uninstrumented\nfun:__fwprintf_chk=uninstrumented\nfun:__fwritable=uninstrumented\nfun:__fwriting=uninstrumented\nfun:__fxstat=uninstrumented\nfun:__fxstat64=uninstrumented\nfun:__fxstatat=uninstrumented\nfun:__fxstatat64=uninstrumented\nfun:__gai_sigqueue=uninstrumented\nfun:__gamma_r_finite=uninstrumented\nfun:__gammaf128_r_finite=uninstrumented\nfun:__gammaf_r_finite=uninstrumented\nfun:__gammal_r_finite=uninstrumented\nfun:__gcc_bcmp=uninstrumented\nfun:__gcc_personality_v0=uninstrumented\nfun:__gconv_get_alias_db=uninstrumented\nfun:__gconv_get_cache=uninstrumented\nfun:__gconv_get_modules_db=uninstrumented\nfun:__gconv_transliterate=uninstrumented\nfun:__generic_findstack=uninstrumented\nfun:__generic_morestack=uninstrumented\nfun:__generic_morestack_set_initial_sp=uninstrumented\nfun:__generic_releasestack=uninstrumented\nfun:__get_cpu_features=uninstrumented\nfun:__getauxval=uninstrumented\nfun:__getcwd_chk=uninstrumented\nfun:__getdelim=uninstrumented\nfun:__getdomainname_chk=uninstrumented\nfun:__getf2=uninstrumented\nfun:__getgroups_chk=uninstrumented\nfun:__gethostname_chk=uninstrumented\nfun:__getlogin_r_chk=uninstrumented\nfun:__getmntent_r=uninstrumented\nfun:__getpagesize=uninstrumented\nfun:__getpgid=uninstrumented\nfun:__getpid=uninstrumented\nfun:__getrlimit=uninstrumented\nfun:__gets_chk=uninstrumented\nfun:__gettimeofday=uninstrumented\nfun:__getwd_chk=uninstrumented\nfun:__gmtime_r=uninstrumented\nfun:__gttf2=uninstrumented\nfun:__h_errno_location=uninstrumented\nfun:__hostalias=uninstrumented\nfun:__hypot_finite=uninstrumented\nfun:__hypotf128_finite=uninstrumented\nfun:__hypotf_finite=uninstrumented\nfun:__hypotl_finite=uninstrumented\nfun:__inet6_scopeid_pton=uninstrumented\nfun:__inet_pton_length=uninstrumented\nfun:__internal_endnetgrent=uninstrumented\nfun:__internal_getnetgrent_r=uninstrumented\nfun:__internal_setnetgrent=uninstrumented\nfun:__isalnum_l=uninstrumented\nfun:__isalpha_l=uninstrumented\nfun:__isascii_l=uninstrumented\nfun:__isblank_l=uninstrumented\nfun:__iscanonicall=uninstrumented\nfun:__iscntrl_l=uninstrumented\nfun:__isctype=uninstrumented\nfun:__isdigit_l=uninstrumented\nfun:__iseqsig=uninstrumented\nfun:__iseqsigf=uninstrumented\nfun:__iseqsigf128=uninstrumented\nfun:__iseqsigl=uninstrumented\nfun:__isgraph_l=uninstrumented\nfun:__isinf=uninstrumented\nfun:__isinff=uninstrumented\nfun:__isinff128=uninstrumented\nfun:__isinfl=uninstrumented\nfun:__islower_l=uninstrumented\nfun:__isnan=uninstrumented\nfun:__isnanf=uninstrumented\nfun:__isnanf128=uninstrumented\nfun:__isnanl=uninstrumented\nfun:__isoc99_fscanf=uninstrumented\nfun:__isoc99_fwscanf=uninstrumented\nfun:__isoc99_scanf=uninstrumented\nfun:__isoc99_sscanf=uninstrumented\nfun:__isoc99_swscanf=uninstrumented\nfun:__isoc99_vfscanf=uninstrumented\nfun:__isoc99_vfwscanf=uninstrumented\nfun:__isoc99_vscanf=uninstrumented\nfun:__isoc99_vsscanf=uninstrumented\nfun:__isoc99_vswscanf=uninstrumented\nfun:__isoc99_vwscanf=uninstrumented\nfun:__isoc99_wscanf=uninstrumented\nfun:__isprint_l=uninstrumented\nfun:__ispunct_l=uninstrumented\nfun:__issignaling=uninstrumented\nfun:__issignalingf=uninstrumented\nfun:__issignalingf128=uninstrumented\nfun:__issignalingl=uninstrumented\nfun:__isspace_l=uninstrumented\nfun:__isupper_l=uninstrumented\nfun:__iswalnum_l=uninstrumented\nfun:__iswalpha_l=uninstrumented\nfun:__iswblank_l=uninstrumented\nfun:__iswcntrl_l=uninstrumented\nfun:__iswctype=uninstrumented\nfun:__iswctype_l=uninstrumented\nfun:__iswdigit_l=uninstrumented\nfun:__iswgraph_l=uninstrumented\nfun:__iswlower_l=uninstrumented\nfun:__iswprint_l=uninstrumented\nfun:__iswpunct_l=uninstrumented\nfun:__iswspace_l=uninstrumented\nfun:__iswupper_l=uninstrumented\nfun:__iswxdigit_l=uninstrumented\nfun:__isxdigit_l=uninstrumented\nfun:__ivaliduser=uninstrumented\nfun:__j0_finite=uninstrumented\nfun:__j0f128_finite=uninstrumented\nfun:__j0f_finite=uninstrumented\nfun:__j0l_finite=uninstrumented\nfun:__j1_finite=uninstrumented\nfun:__j1f128_finite=uninstrumented\nfun:__j1f_finite=uninstrumented\nfun:__j1l_finite=uninstrumented\nfun:__jn_finite=uninstrumented\nfun:__jnf128_finite=uninstrumented\nfun:__jnf_finite=uninstrumented\nfun:__jnl_finite=uninstrumented\nfun:__letf2=uninstrumented\nfun:__lgamma_r_finite=uninstrumented\nfun:__lgammaf128_r_finite=uninstrumented\nfun:__lgammaf_r_finite=uninstrumented\nfun:__lgammal_r_finite=uninstrumented\nfun:__libc_alloc_buffer_alloc_array=uninstrumented\nfun:__libc_alloc_buffer_allocate=uninstrumented\nfun:__libc_alloc_buffer_copy_bytes=uninstrumented\nfun:__libc_alloc_buffer_copy_string=uninstrumented\nfun:__libc_alloc_buffer_create_failure=uninstrumented\nfun:__libc_alloca_cutoff=uninstrumented\nfun:__libc_allocate_rtsig=uninstrumented\nfun:__libc_allocate_rtsig_private=uninstrumented\nfun:__libc_calloc=uninstrumented\nfun:__libc_clntudp_bufcreate=uninstrumented\nfun:__libc_csu_fini=uninstrumented\nfun:__libc_csu_init=uninstrumented\nfun:__libc_current_sigrtmax=uninstrumented\nfun:__libc_current_sigrtmax_private=uninstrumented\nfun:__libc_current_sigrtmin=uninstrumented\nfun:__libc_current_sigrtmin_private=uninstrumented\nfun:__libc_dlclose=uninstrumented\nfun:__libc_dlopen_mode=uninstrumented\nfun:__libc_dlsym=uninstrumented\nfun:__libc_dlvsym=uninstrumented\nfun:__libc_dynarray_at_failure=uninstrumented\nfun:__libc_dynarray_emplace_enlarge=uninstrumented\nfun:__libc_dynarray_finalize=uninstrumented\nfun:__libc_dynarray_resize=uninstrumented\nfun:__libc_dynarray_resize_clear=uninstrumented\nfun:__libc_fatal=uninstrumented\nfun:__libc_fork=uninstrumented\nfun:__libc_free=uninstrumented\nfun:__libc_freeres=uninstrumented\nfun:__libc_ifunc_impl_list=uninstrumented\nfun:__libc_init_first=uninstrumented\nfun:__libc_longjmp=uninstrumented\nfun:__libc_mallinfo=uninstrumented\nfun:__libc_malloc=uninstrumented\nfun:__libc_mallopt=uninstrumented\nfun:__libc_memalign=uninstrumented\nfun:__libc_msgrcv=uninstrumented\nfun:__libc_msgsnd=uninstrumented\nfun:__libc_pread=uninstrumented\nfun:__libc_pthread_init=uninstrumented\nfun:__libc_pvalloc=uninstrumented\nfun:__libc_pwrite=uninstrumented\nfun:__libc_realloc=uninstrumented\nfun:__libc_reallocarray=uninstrumented\nfun:__libc_rpc_getport=uninstrumented\nfun:__libc_sa_len=uninstrumented\nfun:__libc_scratch_buffer_grow=uninstrumented\nfun:__libc_scratch_buffer_grow_preserve=uninstrumented\nfun:__libc_scratch_buffer_set_array_size=uninstrumented\nfun:__libc_secure_getenv=uninstrumented\nfun:__libc_siglongjmp=uninstrumented\nfun:__libc_start_main=uninstrumented\nfun:__libc_system=uninstrumented\nfun:__libc_thread_freeres=uninstrumented\nfun:__libc_valloc=uninstrumented\nfun:__libc_vfork=uninstrumented\nfun:__loc_aton=uninstrumented\nfun:__loc_ntoa=uninstrumented\nfun:__log10_finite=uninstrumented\nfun:__log10f128_finite=uninstrumented\nfun:__log10f_finite=uninstrumented\nfun:__log10l_finite=uninstrumented\nfun:__log2_finite=uninstrumented\nfun:__log2f128_finite=uninstrumented\nfun:__log2f_finite=uninstrumented\nfun:__log2l_finite=uninstrumented\nfun:__log_finite=uninstrumented\nfun:__logf128_finite=uninstrumented\nfun:__logf_finite=uninstrumented\nfun:__logl_finite=uninstrumented\nfun:__longjmp_chk=uninstrumented\nfun:__lseek=uninstrumented\nfun:__lshrti3=uninstrumented\nfun:__lstat=uninstrumented\nfun:__lttf2=uninstrumented\nfun:__lxstat=uninstrumented\nfun:__lxstat64=uninstrumented\nfun:__madvise=uninstrumented\nfun:__mbrlen=uninstrumented\nfun:__mbrtowc=uninstrumented\nfun:__mbsnrtowcs_chk=uninstrumented\nfun:__mbsrtowcs_chk=uninstrumented\nfun:__mbstowcs_chk=uninstrumented\nfun:__memcpy_chk=uninstrumented\nfun:__memmove_chk=uninstrumented\nfun:__mempcpy=uninstrumented\nfun:__mempcpy_chk=uninstrumented\nfun:__mempcpy_small=uninstrumented\nfun:__memset_chk=uninstrumented\nfun:__merge_grp=uninstrumented\nfun:__mknod=uninstrumented\nfun:__mktemp=uninstrumented\nfun:__mmap=uninstrumented\nfun:__modti3=uninstrumented\nfun:__monstartup=uninstrumented\nfun:__morestack=uninstrumented\nfun:__morestack_allocate_stack_space=uninstrumented\nfun:__morestack_block_signals=uninstrumented\nfun:__morestack_fail=uninstrumented\nfun:__morestack_get_guard=uninstrumented\nfun:__morestack_large_model=uninstrumented\nfun:__morestack_load_mmap=uninstrumented\nfun:__morestack_make_guard=uninstrumented\nfun:__morestack_non_split=uninstrumented\nfun:__morestack_release_segments=uninstrumented\nfun:__morestack_set_guard=uninstrumented\nfun:__morestack_unblock_signals=uninstrumented\nfun:__mprotect=uninstrumented\nfun:__mq_open_2=uninstrumented\nfun:__muldc3=uninstrumented\nfun:__mulsc3=uninstrumented\nfun:__multc3=uninstrumented\nfun:__multf3=uninstrumented\nfun:__multi3=uninstrumented\nfun:__mulvdi3=uninstrumented\nfun:__mulvsi3=uninstrumented\nfun:__mulvti3=uninstrumented\nfun:__mulxc3=uninstrumented\nfun:__munmap=uninstrumented\nfun:__nanosleep=uninstrumented\nfun:__negtf2=uninstrumented\nfun:__negti2=uninstrumented\nfun:__negvdi2=uninstrumented\nfun:__negvsi2=uninstrumented\nfun:__negvti2=uninstrumented\nfun:__netf2=uninstrumented\nfun:__netlink_assert_response=uninstrumented\nfun:__newlocale=uninstrumented\nfun:__nis_default_access=uninstrumented\nfun:__nis_default_group=uninstrumented\nfun:__nis_default_owner=uninstrumented\nfun:__nis_default_ttl=uninstrumented\nfun:__nis_finddirectory=uninstrumented\nfun:__nis_hash=uninstrumented\nfun:__nisbind_connect=uninstrumented\nfun:__nisbind_create=uninstrumented\nfun:__nisbind_destroy=uninstrumented\nfun:__nisbind_next=uninstrumented\nfun:__nl_langinfo_l=uninstrumented\nfun:__ns_get16=uninstrumented\nfun:__ns_get32=uninstrumented\nfun:__ns_name_ntop=uninstrumented\nfun:__ns_name_unpack=uninstrumented\nfun:__nss_configure_lookup=uninstrumented\nfun:__nss_database_lookup=uninstrumented\nfun:__nss_disable_nscd=uninstrumented\nfun:__nss_group_lookup=uninstrumented\nfun:__nss_group_lookup2=uninstrumented\nfun:__nss_hash=uninstrumented\nfun:__nss_hostname_digits_dots=uninstrumented\nfun:__nss_hosts_lookup=uninstrumented\nfun:__nss_hosts_lookup2=uninstrumented\nfun:__nss_lookup=uninstrumented\nfun:__nss_lookup_function=uninstrumented\nfun:__nss_next=uninstrumented\nfun:__nss_next2=uninstrumented\nfun:__nss_passwd_lookup=uninstrumented\nfun:__nss_passwd_lookup2=uninstrumented\nfun:__nss_services_lookup2=uninstrumented\nfun:__obstack_printf_chk=uninstrumented\nfun:__obstack_vprintf_chk=uninstrumented\nfun:__open=uninstrumented\nfun:__open64=uninstrumented\nfun:__open64_2=uninstrumented\nfun:__open_2=uninstrumented\nfun:__open_catalog=uninstrumented\nfun:__open_nocancel=uninstrumented\nfun:__openat64_2=uninstrumented\nfun:__openat_2=uninstrumented\nfun:__overflow=uninstrumented\nfun:__p_cdname=uninstrumented\nfun:__p_cdnname=uninstrumented\nfun:__p_class=uninstrumented\nfun:__p_fqname=uninstrumented\nfun:__p_fqnname=uninstrumented\nfun:__p_option=uninstrumented\nfun:__p_query=uninstrumented\nfun:__p_rcode=uninstrumented\nfun:__p_secstodate=uninstrumented\nfun:__p_time=uninstrumented\nfun:__p_type=uninstrumented\nfun:__paritydi2=uninstrumented\nfun:__parityti2=uninstrumented\nfun:__pipe=uninstrumented\nfun:__poll=uninstrumented\nfun:__poll_chk=uninstrumented\nfun:__popcountdi2=uninstrumented\nfun:__popcountti2=uninstrumented\nfun:__posix_getopt=uninstrumented\nfun:__pow_finite=uninstrumented\nfun:__powf128_finite=uninstrumented\nfun:__powf_finite=uninstrumented\nfun:__powidf2=uninstrumented\nfun:__powisf2=uninstrumented\nfun:__powitf2=uninstrumented\nfun:__powixf2=uninstrumented\nfun:__powl_finite=uninstrumented\nfun:__ppoll_chk=uninstrumented\nfun:__pread64=uninstrumented\nfun:__pread64_chk=uninstrumented\nfun:__pread_chk=uninstrumented\nfun:__prepare_niscall=uninstrumented\nfun:__printf_chk=uninstrumented\nfun:__printf_fp=uninstrumented\nfun:__profile_frequency=uninstrumented\nfun:__pthread_atfork=uninstrumented\nfun:__pthread_barrier_init=uninstrumented\nfun:__pthread_barrier_wait=uninstrumented\nfun:__pthread_cleanup_routine=uninstrumented\nfun:__pthread_clock_gettime=uninstrumented\nfun:__pthread_clock_settime=uninstrumented\nfun:__pthread_get_minstack=uninstrumented\nfun:__pthread_getspecific=uninstrumented\nfun:__pthread_initialize_minimal=uninstrumented\nfun:__pthread_key_create=uninstrumented\nfun:__pthread_mutex_destroy=uninstrumented\nfun:__pthread_mutex_init=uninstrumented\nfun:__pthread_mutex_lock=uninstrumented\nfun:__pthread_mutex_trylock=uninstrumented\nfun:__pthread_mutex_unlock=uninstrumented\nfun:__pthread_mutexattr_destroy=uninstrumented\nfun:__pthread_mutexattr_init=uninstrumented\nfun:__pthread_mutexattr_settype=uninstrumented\nfun:__pthread_once=uninstrumented\nfun:__pthread_register_cancel=uninstrumented\nfun:__pthread_register_cancel_defer=uninstrumented\nfun:__pthread_rwlock_destroy=uninstrumented\nfun:__pthread_rwlock_init=uninstrumented\nfun:__pthread_rwlock_rdlock=uninstrumented\nfun:__pthread_rwlock_tryrdlock=uninstrumented\nfun:__pthread_rwlock_trywrlock=uninstrumented\nfun:__pthread_rwlock_unlock=uninstrumented\nfun:__pthread_rwlock_wrlock=uninstrumented\nfun:__pthread_setspecific=uninstrumented\nfun:__pthread_unregister_cancel=uninstrumented\nfun:__pthread_unregister_cancel_restore=uninstrumented\nfun:__pthread_unwind=uninstrumented\nfun:__pthread_unwind_next=uninstrumented\nfun:__ptsname_r_chk=uninstrumented\nfun:__putlong=uninstrumented\nfun:__putshort=uninstrumented\nfun:__pwrite64=uninstrumented\nfun:__rawmemchr=uninstrumented\nfun:__read=uninstrumented\nfun:__read_chk=uninstrumented\nfun:__read_nocancel=uninstrumented\nfun:__readlink_chk=uninstrumented\nfun:__readlinkat_chk=uninstrumented\nfun:__realpath_chk=uninstrumented\nfun:__recv=uninstrumented\nfun:__recv_chk=uninstrumented\nfun:__recvfrom_chk=uninstrumented\nfun:__register_atfork=uninstrumented\nfun:__register_frame=uninstrumented\nfun:__register_frame_info=uninstrumented\nfun:__register_frame_info_bases=uninstrumented\nfun:__register_frame_info_table=uninstrumented\nfun:__register_frame_info_table_bases=uninstrumented\nfun:__register_frame_table=uninstrumented\nfun:__remainder_finite=uninstrumented\nfun:__remainderf128_finite=uninstrumented\nfun:__remainderf_finite=uninstrumented\nfun:__remainderl_finite=uninstrumented\nfun:__res_close=uninstrumented\nfun:__res_context_hostalias=uninstrumented\nfun:__res_context_query=uninstrumented\nfun:__res_context_search=uninstrumented\nfun:__res_dnok=uninstrumented\nfun:__res_hnok=uninstrumented\nfun:__res_hostalias=uninstrumented\nfun:__res_iclose=uninstrumented\nfun:__res_init=uninstrumented\nfun:__res_isourserver=uninstrumented\nfun:__res_mailok=uninstrumented\nfun:__res_mkquery=uninstrumented\nfun:__res_nameinquery=uninstrumented\nfun:__res_nclose=uninstrumented\nfun:__res_ninit=uninstrumented\nfun:__res_nmkquery=uninstrumented\nfun:__res_nquery=uninstrumented\nfun:__res_nquerydomain=uninstrumented\nfun:__res_nsearch=uninstrumented\nfun:__res_nsend=uninstrumented\nfun:__res_ownok=uninstrumented\nfun:__res_queriesmatch=uninstrumented\nfun:__res_query=uninstrumented\nfun:__res_querydomain=uninstrumented\nfun:__res_randomid=uninstrumented\nfun:__res_search=uninstrumented\nfun:__res_send=uninstrumented\nfun:__res_state=uninstrumented\nfun:__resolv_context_get=uninstrumented\nfun:__resolv_context_get_override=uninstrumented\nfun:__resolv_context_get_preinit=uninstrumented\nfun:__resolv_context_put=uninstrumented\nfun:__rpc_thread_createerr=uninstrumented\nfun:__rpc_thread_svc_fdset=uninstrumented\nfun:__rpc_thread_svc_max_pollfd=uninstrumented\nfun:__rpc_thread_svc_pollfd=uninstrumented\nfun:__sbrk=uninstrumented\nfun:__scalb_finite=uninstrumented\nfun:__scalbf_finite=uninstrumented\nfun:__scalbl_finite=uninstrumented\nfun:__sched_cpualloc=uninstrumented\nfun:__sched_cpucount=uninstrumented\nfun:__sched_cpufree=uninstrumented\nfun:__sched_get_priority_max=uninstrumented\nfun:__sched_get_priority_min=uninstrumented\nfun:__sched_getparam=uninstrumented\nfun:__sched_getscheduler=uninstrumented\nfun:__sched_setscheduler=uninstrumented\nfun:__sched_yield=uninstrumented\nfun:__secure_getenv=uninstrumented\nfun:__select=uninstrumented\nfun:__send=uninstrumented\nfun:__sendmmsg=uninstrumented\nfun:__setmntent=uninstrumented\nfun:__setpgid=uninstrumented\nfun:__sfp_handle_exceptions=uninstrumented\nfun:__shm_directory=uninstrumented\nfun:__sigaction=uninstrumented\nfun:__sigaddset=uninstrumented\nfun:__sigdelset=uninstrumented\nfun:__sigismember=uninstrumented\nfun:__signbit=uninstrumented\nfun:__signbitf=uninstrumented\nfun:__signbitf128=uninstrumented\nfun:__signbitl=uninstrumented\nfun:__sigpause=uninstrumented\nfun:__sigsetjmp=uninstrumented\nfun:__sigsuspend=uninstrumented\nfun:__sigtimedwait=uninstrumented\nfun:__sinh_finite=uninstrumented\nfun:__sinhf128_finite=uninstrumented\nfun:__sinhf_finite=uninstrumented\nfun:__sinhl_finite=uninstrumented\nfun:__snprintf=uninstrumented\nfun:__snprintf_chk=uninstrumented\nfun:__socket=uninstrumented\nfun:__splitstack_block_signals=uninstrumented\nfun:__splitstack_block_signals_context=uninstrumented\nfun:__splitstack_find=uninstrumented\nfun:__splitstack_find_context=uninstrumented\nfun:__splitstack_getcontext=uninstrumented\nfun:__splitstack_makecontext=uninstrumented\nfun:__splitstack_releasecontext=uninstrumented\nfun:__splitstack_resetcontext=uninstrumented\nfun:__splitstack_setcontext=uninstrumented\nfun:__sprintf_chk=uninstrumented\nfun:__sqrt_finite=uninstrumented\nfun:__sqrtf128_finite=uninstrumented\nfun:__sqrtf_finite=uninstrumented\nfun:__sqrtl_finite=uninstrumented\nfun:__stack_chk_fail=uninstrumented\nfun:__stack_chk_fail_local=uninstrumented\nfun:__stack_split_initialize=uninstrumented\nfun:__stat=uninstrumented\nfun:__statfs=uninstrumented\nfun:__stpcpy=uninstrumented\nfun:__stpcpy_chk=uninstrumented\nfun:__stpcpy_small=uninstrumented\nfun:__stpncpy=uninstrumented\nfun:__stpncpy_chk=uninstrumented\nfun:__strcasecmp=uninstrumented\nfun:__strcasecmp_l=uninstrumented\nfun:__strcasestr=uninstrumented\nfun:__strcat_chk=uninstrumented\nfun:__strcoll_l=uninstrumented\nfun:__strcpy_chk=uninstrumented\nfun:__strcpy_small=uninstrumented\nfun:__strcspn_c1=uninstrumented\nfun:__strcspn_c2=uninstrumented\nfun:__strcspn_c3=uninstrumented\nfun:__strdup=uninstrumented\nfun:__strerror_r=uninstrumented\nfun:__strfmon_l=uninstrumented\nfun:__strftime_l=uninstrumented\nfun:__strncasecmp_l=uninstrumented\nfun:__strncat_chk=uninstrumented\nfun:__strncpy_chk=uninstrumented\nfun:__strndup=uninstrumented\nfun:__strpbrk_c2=uninstrumented\nfun:__strpbrk_c3=uninstrumented\nfun:__strsep_1c=uninstrumented\nfun:__strsep_2c=uninstrumented\nfun:__strsep_3c=uninstrumented\nfun:__strsep_g=uninstrumented\nfun:__strspn_c1=uninstrumented\nfun:__strspn_c2=uninstrumented\nfun:__strspn_c3=uninstrumented\nfun:__strtod_internal=uninstrumented\nfun:__strtod_l=uninstrumented\nfun:__strtod_nan=uninstrumented\nfun:__strtof128_internal=uninstrumented\nfun:__strtof128_nan=uninstrumented\nfun:__strtof_internal=uninstrumented\nfun:__strtof_l=uninstrumented\nfun:__strtof_nan=uninstrumented\nfun:__strtok_r=uninstrumented\nfun:__strtok_r_1c=uninstrumented\nfun:__strtol_internal=uninstrumented\nfun:__strtol_l=uninstrumented\nfun:__strtold_internal=uninstrumented\nfun:__strtold_l=uninstrumented\nfun:__strtold_nan=uninstrumented\nfun:__strtoll_internal=uninstrumented\nfun:__strtoll_l=uninstrumented\nfun:__strtoul_internal=uninstrumented\nfun:__strtoul_l=uninstrumented\nfun:__strtoull_internal=uninstrumented\nfun:__strtoull_l=uninstrumented\nfun:__strverscmp=uninstrumented\nfun:__strxfrm_l=uninstrumented\nfun:__subtf3=uninstrumented\nfun:__subvdi3=uninstrumented\nfun:__subvsi3=uninstrumented\nfun:__subvti3=uninstrumented\nfun:__swprintf_chk=uninstrumented\nfun:__sym_ntop=uninstrumented\nfun:__sym_ntos=uninstrumented\nfun:__sym_ston=uninstrumented\nfun:__sysconf=uninstrumented\nfun:__sysctl=uninstrumented\nfun:__syslog_chk=uninstrumented\nfun:__sysv_signal=uninstrumented\nfun:__tdelete=uninstrumented\nfun:__tfind=uninstrumented\nfun:__tls_get_addr=uninstrumented\nfun:__toascii_l=uninstrumented\nfun:__tolower_l=uninstrumented\nfun:__toupper_l=uninstrumented\nfun:__towctrans=uninstrumented\nfun:__towctrans_l=uninstrumented\nfun:__towlower_l=uninstrumented\nfun:__towupper_l=uninstrumented\nfun:__trunctfdf2=uninstrumented\nfun:__trunctfsf2=uninstrumented\nfun:__trunctfxf2=uninstrumented\nfun:__tsearch=uninstrumented\nfun:__ttyname_r_chk=uninstrumented\nfun:__tunable_get_val=uninstrumented\nfun:__twalk=uninstrumented\nfun:__ucmpti2=uninstrumented\nfun:__udiv_w_sdiv=uninstrumented\nfun:__udivmodti4=uninstrumented\nfun:__udivti3=uninstrumented\nfun:__uflow=uninstrumented\nfun:__umodti3=uninstrumented\nfun:__underflow=uninstrumented\nfun:__unordtf2=uninstrumented\nfun:__uselocale=uninstrumented\nfun:__vasprintf_chk=uninstrumented\nfun:__vdprintf_chk=uninstrumented\nfun:__vfork=uninstrumented\nfun:__vfprintf_chk=uninstrumented\nfun:__vfscanf=uninstrumented\nfun:__vfwprintf_chk=uninstrumented\nfun:__vprintf_chk=uninstrumented\nfun:__vsnprintf=uninstrumented\nfun:__vsnprintf_chk=uninstrumented\nfun:__vsprintf_chk=uninstrumented\nfun:__vsscanf=uninstrumented\nfun:__vswprintf_chk=uninstrumented\nfun:__vsyslog_chk=uninstrumented\nfun:__vwprintf_chk=uninstrumented\nfun:__wait=uninstrumented\nfun:__waitpid=uninstrumented\nfun:__wcpcpy_chk=uninstrumented\nfun:__wcpncpy_chk=uninstrumented\nfun:__wcrtomb_chk=uninstrumented\nfun:__wcscasecmp_l=uninstrumented\nfun:__wcscat_chk=uninstrumented\nfun:__wcscoll_l=uninstrumented\nfun:__wcscpy_chk=uninstrumented\nfun:__wcsftime_l=uninstrumented\nfun:__wcsncasecmp_l=uninstrumented\nfun:__wcsncat_chk=uninstrumented\nfun:__wcsncpy_chk=uninstrumented\nfun:__wcsnrtombs_chk=uninstrumented\nfun:__wcsrtombs_chk=uninstrumented\nfun:__wcstod_internal=uninstrumented\nfun:__wcstod_l=uninstrumented\nfun:__wcstof128_internal=uninstrumented\nfun:__wcstof_internal=uninstrumented\nfun:__wcstof_l=uninstrumented\nfun:__wcstol_internal=uninstrumented\nfun:__wcstol_l=uninstrumented\nfun:__wcstold_internal=uninstrumented\nfun:__wcstold_l=uninstrumented\nfun:__wcstoll_internal=uninstrumented\nfun:__wcstoll_l=uninstrumented\nfun:__wcstombs_chk=uninstrumented\nfun:__wcstoul_internal=uninstrumented\nfun:__wcstoul_l=uninstrumented\nfun:__wcstoull_internal=uninstrumented\nfun:__wcstoull_l=uninstrumented\nfun:__wcsxfrm_l=uninstrumented\nfun:__wctomb_chk=uninstrumented\nfun:__wctrans_l=uninstrumented\nfun:__wctype_l=uninstrumented\nfun:__wmemcpy_chk=uninstrumented\nfun:__wmemmove_chk=uninstrumented\nfun:__wmempcpy_chk=uninstrumented\nfun:__wmemset_chk=uninstrumented\nfun:__woverflow=uninstrumented\nfun:__wprintf_chk=uninstrumented\nfun:__wrap_pthread_create=uninstrumented\nfun:__write=uninstrumented\nfun:__wuflow=uninstrumented\nfun:__wunderflow=uninstrumented\nfun:__xmknod=uninstrumented\nfun:__xmknodat=uninstrumented\nfun:__xpg_basename=uninstrumented\nfun:__xpg_sigpause=uninstrumented\nfun:__xpg_strerror_r=uninstrumented\nfun:__xstat=uninstrumented\nfun:__xstat64=uninstrumented\nfun:__y0_finite=uninstrumented\nfun:__y0f128_finite=uninstrumented\nfun:__y0f_finite=uninstrumented\nfun:__y0l_finite=uninstrumented\nfun:__y1_finite=uninstrumented\nfun:__y1f128_finite=uninstrumented\nfun:__y1f_finite=uninstrumented\nfun:__y1l_finite=uninstrumented\nfun:__yn_finite=uninstrumented\nfun:__ynf128_finite=uninstrumented\nfun:__ynf_finite=uninstrumented\nfun:__ynl_finite=uninstrumented\nfun:__yp_check=uninstrumented\nfun:_authenticate=uninstrumented\nfun:_dl_addr=uninstrumented\nfun:_dl_allocate_tls=uninstrumented\nfun:_dl_allocate_tls_init=uninstrumented\nfun:_dl_catch_error=uninstrumented\nfun:_dl_catch_exception=uninstrumented\nfun:_dl_deallocate_tls=uninstrumented\nfun:_dl_debug_state=uninstrumented\nfun:_dl_exception_create=uninstrumented\nfun:_dl_exception_create_format=uninstrumented\nfun:_dl_exception_free=uninstrumented\nfun:_dl_find_dso_for_object=uninstrumented\nfun:_dl_get_tls_static_info=uninstrumented\nfun:_dl_make_stack_executable=uninstrumented\nfun:_dl_mcount=uninstrumented\nfun:_dl_mcount_wrapper=uninstrumented\nfun:_dl_mcount_wrapper_check=uninstrumented\nfun:_dl_rtld_di_serinfo=uninstrumented\nfun:_dl_signal_error=uninstrumented\nfun:_dl_signal_exception=uninstrumented\nfun:_dl_sym=uninstrumented\nfun:_dl_vsym=uninstrumented\nfun:_exit=uninstrumented\nfun:_flushlbf=uninstrumented\nfun:_gethtbyaddr=uninstrumented\nfun:_gethtbyname=uninstrumented\nfun:_gethtbyname2=uninstrumented\nfun:_gethtent=uninstrumented\nfun:_getlong=uninstrumented\nfun:_getshort=uninstrumented\nfun:_longjmp=uninstrumented\nfun:_mcleanup=uninstrumented\nfun:_mcount=uninstrumented\nfun:_nsl_default_nss=uninstrumented\nfun:_nss_files_parse_grent=uninstrumented\nfun:_nss_files_parse_pwent=uninstrumented\nfun:_nss_files_parse_sgent=uninstrumented\nfun:_nss_files_parse_spent=uninstrumented\nfun:_obstack_allocated_p=uninstrumented\nfun:_obstack_begin=uninstrumented\nfun:_obstack_begin_1=uninstrumented\nfun:_obstack_free=uninstrumented\nfun:_obstack_memory_used=uninstrumented\nfun:_obstack_newchunk=uninstrumented\nfun:_pthread_cleanup_pop=uninstrumented\nfun:_pthread_cleanup_pop_restore=uninstrumented\nfun:_pthread_cleanup_push=uninstrumented\nfun:_pthread_cleanup_push_defer=uninstrumented\nfun:_rpc_dtablesize=uninstrumented\nfun:_seterr_reply=uninstrumented\nfun:_sethtent=uninstrumented\nfun:_setjmp=uninstrumented\nfun:_tolower=uninstrumented\nfun:_toupper=uninstrumented\nfun:_xdr_ib_request=uninstrumented\nfun:_xdr_nis_result=uninstrumented\nfun:a64l=uninstrumented\nfun:abort=uninstrumented\nfun:abs=uninstrumented\nfun:accept=uninstrumented\nfun:accept4=uninstrumented\nfun:access=uninstrumented\nfun:acct=uninstrumented\nfun:acos=uninstrumented\nfun:acosf=uninstrumented\nfun:acosf128=uninstrumented\nfun:acosf32=uninstrumented\nfun:acosf32x=uninstrumented\nfun:acosf64=uninstrumented\nfun:acosf64x=uninstrumented\nfun:acosh=uninstrumented\nfun:acoshf=uninstrumented\nfun:acoshf128=uninstrumented\nfun:acoshf32=uninstrumented\nfun:acoshf32x=uninstrumented\nfun:acoshf64=uninstrumented\nfun:acoshf64x=uninstrumented\nfun:acoshl=uninstrumented\nfun:acosl=uninstrumented\nfun:addmntent=uninstrumented\nfun:addseverity=uninstrumented\nfun:adjtime=uninstrumented\nfun:adjtimex=uninstrumented\nfun:advance=uninstrumented\nfun:aio_cancel=uninstrumented\nfun:aio_cancel64=uninstrumented\nfun:aio_error=uninstrumented\nfun:aio_error64=uninstrumented\nfun:aio_fsync=uninstrumented\nfun:aio_fsync64=uninstrumented\nfun:aio_init=uninstrumented\nfun:aio_read=uninstrumented\nfun:aio_read64=uninstrumented\nfun:aio_return=uninstrumented\nfun:aio_return64=uninstrumented\nfun:aio_suspend=uninstrumented\nfun:aio_suspend64=uninstrumented\nfun:aio_write=uninstrumented\nfun:aio_write64=uninstrumented\nfun:alarm=uninstrumented\nfun:aligned_alloc=uninstrumented\nfun:alphasort=uninstrumented\nfun:alphasort64=uninstrumented\nfun:arch_prctl=uninstrumented\nfun:argp_error=uninstrumented\nfun:argp_failure=uninstrumented\nfun:argp_help=uninstrumented\nfun:argp_parse=uninstrumented\nfun:argp_state_help=uninstrumented\nfun:argp_usage=uninstrumented\nfun:argz_add=uninstrumented\nfun:argz_add_sep=uninstrumented\nfun:argz_append=uninstrumented\nfun:argz_count=uninstrumented\nfun:argz_create=uninstrumented\nfun:argz_create_sep=uninstrumented\nfun:argz_delete=uninstrumented\nfun:argz_extract=uninstrumented\nfun:argz_insert=uninstrumented\nfun:argz_next=uninstrumented\nfun:argz_replace=uninstrumented\nfun:argz_stringify=uninstrumented\nfun:asctime=uninstrumented\nfun:asctime_r=uninstrumented\nfun:asin=uninstrumented\nfun:asinf=uninstrumented\nfun:asinf128=uninstrumented\nfun:asinf32=uninstrumented\nfun:asinf32x=uninstrumented\nfun:asinf64=uninstrumented\nfun:asinf64x=uninstrumented\nfun:asinh=uninstrumented\nfun:asinhf=uninstrumented\nfun:asinhf128=uninstrumented\nfun:asinhf32=uninstrumented\nfun:asinhf32x=uninstrumented\nfun:asinhf64=uninstrumented\nfun:asinhf64x=uninstrumented\nfun:asinhl=uninstrumented\nfun:asinl=uninstrumented\nfun:asprintf=uninstrumented\nfun:at_quick_exit=uninstrumented\nfun:atan=uninstrumented\nfun:atan2=uninstrumented\nfun:atan2f=uninstrumented\nfun:atan2f128=uninstrumented\nfun:atan2f32=uninstrumented\nfun:atan2f32x=uninstrumented\nfun:atan2f64=uninstrumented\nfun:atan2f64x=uninstrumented\nfun:atan2l=uninstrumented\nfun:atanf=uninstrumented\nfun:atanf128=uninstrumented\nfun:atanf32=uninstrumented\nfun:atanf32x=uninstrumented\nfun:atanf64=uninstrumented\nfun:atanf64x=uninstrumented\nfun:atanh=uninstrumented\nfun:atanhf=uninstrumented\nfun:atanhf128=uninstrumented\nfun:atanhf32=uninstrumented\nfun:atanhf32x=uninstrumented\nfun:atanhf64=uninstrumented\nfun:atanhf64x=uninstrumented\nfun:atanhl=uninstrumented\nfun:atanl=uninstrumented\nfun:atexit=uninstrumented\nfun:atof=uninstrumented\nfun:atoi=uninstrumented\nfun:atol=uninstrumented\nfun:atoll=uninstrumented\nfun:authdes_create=uninstrumented\nfun:authdes_getucred=uninstrumented\nfun:authdes_pk_create=uninstrumented\nfun:authnone_create=uninstrumented\nfun:authunix_create=uninstrumented\nfun:authunix_create_default=uninstrumented\nfun:backtrace=uninstrumented\nfun:backtrace_symbols=uninstrumented\nfun:backtrace_symbols_fd=uninstrumented\nfun:basename=uninstrumented\nfun:bcmp=uninstrumented\nfun:bcopy=uninstrumented\nfun:bdflush=uninstrumented\nfun:bind=uninstrumented\nfun:bind_textdomain_codeset=uninstrumented\nfun:bindresvport=uninstrumented\nfun:bindtextdomain=uninstrumented\nfun:brk=uninstrumented\nfun:bsd_signal=uninstrumented\nfun:bsearch=uninstrumented\nfun:btowc=uninstrumented\nfun:bzero=uninstrumented\nfun:c16rtomb=uninstrumented\nfun:c32rtomb=uninstrumented\nfun:cabs=uninstrumented\nfun:cabsf=uninstrumented\nfun:cabsf128=uninstrumented\nfun:cabsf32=uninstrumented\nfun:cabsf32x=uninstrumented\nfun:cabsf64=uninstrumented\nfun:cabsf64x=uninstrumented\nfun:cabsl=uninstrumented\nfun:cacos=uninstrumented\nfun:cacosf=uninstrumented\nfun:cacosf128=uninstrumented\nfun:cacosf32=uninstrumented\nfun:cacosf32x=uninstrumented\nfun:cacosf64=uninstrumented\nfun:cacosf64x=uninstrumented\nfun:cacosh=uninstrumented\nfun:cacoshf=uninstrumented\nfun:cacoshf128=uninstrumented\nfun:cacoshf32=uninstrumented\nfun:cacoshf32x=uninstrumented\nfun:cacoshf64=uninstrumented\nfun:cacoshf64x=uninstrumented\nfun:cacoshl=uninstrumented\nfun:cacosl=uninstrumented\nfun:calloc=uninstrumented\nfun:callrpc=uninstrumented\nfun:canonicalize=uninstrumented\nfun:canonicalize_file_name=uninstrumented\nfun:canonicalizef=uninstrumented\nfun:canonicalizef128=uninstrumented\nfun:canonicalizef32=uninstrumented\nfun:canonicalizef32x=uninstrumented\nfun:canonicalizef64=uninstrumented\nfun:canonicalizef64x=uninstrumented\nfun:canonicalizel=uninstrumented\nfun:capget=uninstrumented\nfun:capset=uninstrumented\nfun:carg=uninstrumented\nfun:cargf=uninstrumented\nfun:cargf128=uninstrumented\nfun:cargf32=uninstrumented\nfun:cargf32x=uninstrumented\nfun:cargf64=uninstrumented\nfun:cargf64x=uninstrumented\nfun:cargl=uninstrumented\nfun:casin=uninstrumented\nfun:casinf=uninstrumented\nfun:casinf128=uninstrumented\nfun:casinf32=uninstrumented\nfun:casinf32x=uninstrumented\nfun:casinf64=uninstrumented\nfun:casinf64x=uninstrumented\nfun:casinh=uninstrumented\nfun:casinhf=uninstrumented\nfun:casinhf128=uninstrumented\nfun:casinhf32=uninstrumented\nfun:casinhf32x=uninstrumented\nfun:casinhf64=uninstrumented\nfun:casinhf64x=uninstrumented\nfun:casinhl=uninstrumented\nfun:casinl=uninstrumented\nfun:catan=uninstrumented\nfun:catanf=uninstrumented\nfun:catanf128=uninstrumented\nfun:catanf32=uninstrumented\nfun:catanf32x=uninstrumented\nfun:catanf64=uninstrumented\nfun:catanf64x=uninstrumented\nfun:catanh=uninstrumented\nfun:catanhf=uninstrumented\nfun:catanhf128=uninstrumented\nfun:catanhf32=uninstrumented\nfun:catanhf32x=uninstrumented\nfun:catanhf64=uninstrumented\nfun:catanhf64x=uninstrumented\nfun:catanhl=uninstrumented\nfun:catanl=uninstrumented\nfun:catclose=uninstrumented\nfun:catgets=uninstrumented\nfun:catopen=uninstrumented\nfun:cbc_crypt=uninstrumented\nfun:cbrt=uninstrumented\nfun:cbrtf=uninstrumented\nfun:cbrtf128=uninstrumented\nfun:cbrtf32=uninstrumented\nfun:cbrtf32x=uninstrumented\nfun:cbrtf64=uninstrumented\nfun:cbrtf64x=uninstrumented\nfun:cbrtl=uninstrumented\nfun:ccos=uninstrumented\nfun:ccosf=uninstrumented\nfun:ccosf128=uninstrumented\nfun:ccosf32=uninstrumented\nfun:ccosf32x=uninstrumented\nfun:ccosf64=uninstrumented\nfun:ccosf64x=uninstrumented\nfun:ccosh=uninstrumented\nfun:ccoshf=uninstrumented\nfun:ccoshf128=uninstrumented\nfun:ccoshf32=uninstrumented\nfun:ccoshf32x=uninstrumented\nfun:ccoshf64=uninstrumented\nfun:ccoshf64x=uninstrumented\nfun:ccoshl=uninstrumented\nfun:ccosl=uninstrumented\nfun:ceil=uninstrumented\nfun:ceilf=uninstrumented\nfun:ceilf128=uninstrumented\nfun:ceilf32=uninstrumented\nfun:ceilf32x=uninstrumented\nfun:ceilf64=uninstrumented\nfun:ceilf64x=uninstrumented\nfun:ceill=uninstrumented\nfun:cexp=uninstrumented\nfun:cexpf=uninstrumented\nfun:cexpf128=uninstrumented\nfun:cexpf32=uninstrumented\nfun:cexpf32x=uninstrumented\nfun:cexpf64=uninstrumented\nfun:cexpf64x=uninstrumented\nfun:cexpl=uninstrumented\nfun:cfgetispeed=uninstrumented\nfun:cfgetospeed=uninstrumented\nfun:cfmakeraw=uninstrumented\nfun:cfree=uninstrumented\nfun:cfsetispeed=uninstrumented\nfun:cfsetospeed=uninstrumented\nfun:cfsetspeed=uninstrumented\nfun:chdir=uninstrumented\nfun:chflags=uninstrumented\nfun:chmod=uninstrumented\nfun:chown=uninstrumented\nfun:chroot=uninstrumented\nfun:cimag=uninstrumented\nfun:cimagf=uninstrumented\nfun:cimagf128=uninstrumented\nfun:cimagf32=uninstrumented\nfun:cimagf32x=uninstrumented\nfun:cimagf64=uninstrumented\nfun:cimagf64x=uninstrumented\nfun:cimagl=uninstrumented\nfun:clearenv=uninstrumented\nfun:clearerr=uninstrumented\nfun:clearerr_unlocked=uninstrumented\nfun:clnt_broadcast=uninstrumented\nfun:clnt_create=uninstrumented\nfun:clnt_pcreateerror=uninstrumented\nfun:clnt_perrno=uninstrumented\nfun:clnt_perror=uninstrumented\nfun:clnt_spcreateerror=uninstrumented\nfun:clnt_sperrno=uninstrumented\nfun:clnt_sperror=uninstrumented\nfun:clntraw_create=uninstrumented\nfun:clnttcp_create=uninstrumented\nfun:clntudp_bufcreate=uninstrumented\nfun:clntudp_create=uninstrumented\nfun:clntunix_create=uninstrumented\nfun:clock=uninstrumented\nfun:clock_adjtime=uninstrumented\nfun:clock_getcpuclockid=uninstrumented\nfun:clock_getres=uninstrumented\nfun:clock_gettime=uninstrumented\nfun:clock_nanosleep=uninstrumented\nfun:clock_settime=uninstrumented\nfun:clog=uninstrumented\nfun:clog10=uninstrumented\nfun:clog10f=uninstrumented\nfun:clog10f128=uninstrumented\nfun:clog10f32=uninstrumented\nfun:clog10f32x=uninstrumented\nfun:clog10f64=uninstrumented\nfun:clog10f64x=uninstrumented\nfun:clog10l=uninstrumented\nfun:clogf=uninstrumented\nfun:clogf128=uninstrumented\nfun:clogf32=uninstrumented\nfun:clogf32x=uninstrumented\nfun:clogf64=uninstrumented\nfun:clogf64x=uninstrumented\nfun:clogl=uninstrumented\nfun:clone=uninstrumented\nfun:close=uninstrumented\nfun:closedir=uninstrumented\nfun:closelog=uninstrumented\nfun:confstr=uninstrumented\nfun:conj=uninstrumented\nfun:conjf=uninstrumented\nfun:conjf128=uninstrumented\nfun:conjf32=uninstrumented\nfun:conjf32x=uninstrumented\nfun:conjf64=uninstrumented\nfun:conjf64x=uninstrumented\nfun:conjl=uninstrumented\nfun:connect=uninstrumented\nfun:copy_file_range=uninstrumented\nfun:copysign=uninstrumented\nfun:copysignf=uninstrumented\nfun:copysignf128=uninstrumented\nfun:copysignf32=uninstrumented\nfun:copysignf32x=uninstrumented\nfun:copysignf64=uninstrumented\nfun:copysignf64x=uninstrumented\nfun:copysignl=uninstrumented\nfun:cos=uninstrumented\nfun:cosf=uninstrumented\nfun:cosf128=uninstrumented\nfun:cosf32=uninstrumented\nfun:cosf32x=uninstrumented\nfun:cosf64=uninstrumented\nfun:cosf64x=uninstrumented\nfun:cosh=uninstrumented\nfun:coshf=uninstrumented\nfun:coshf128=uninstrumented\nfun:coshf32=uninstrumented\nfun:coshf32x=uninstrumented\nfun:coshf64=uninstrumented\nfun:coshf64x=uninstrumented\nfun:coshl=uninstrumented\nfun:cosl=uninstrumented\nfun:cpow=uninstrumented\nfun:cpowf=uninstrumented\nfun:cpowf128=uninstrumented\nfun:cpowf32=uninstrumented\nfun:cpowf32x=uninstrumented\nfun:cpowf64=uninstrumented\nfun:cpowf64x=uninstrumented\nfun:cpowl=uninstrumented\nfun:cproj=uninstrumented\nfun:cprojf=uninstrumented\nfun:cprojf128=uninstrumented\nfun:cprojf32=uninstrumented\nfun:cprojf32x=uninstrumented\nfun:cprojf64=uninstrumented\nfun:cprojf64x=uninstrumented\nfun:cprojl=uninstrumented\nfun:creal=uninstrumented\nfun:crealf=uninstrumented\nfun:crealf128=uninstrumented\nfun:crealf32=uninstrumented\nfun:crealf32x=uninstrumented\nfun:crealf64=uninstrumented\nfun:crealf64x=uninstrumented\nfun:creall=uninstrumented\nfun:creat=uninstrumented\nfun:creat64=uninstrumented\nfun:create_module=uninstrumented\nfun:crypt=uninstrumented\nfun:crypt_r=uninstrumented\nfun:csin=uninstrumented\nfun:csinf=uninstrumented\nfun:csinf128=uninstrumented\nfun:csinf32=uninstrumented\nfun:csinf32x=uninstrumented\nfun:csinf64=uninstrumented\nfun:csinf64x=uninstrumented\nfun:csinh=uninstrumented\nfun:csinhf=uninstrumented\nfun:csinhf128=uninstrumented\nfun:csinhf32=uninstrumented\nfun:csinhf32x=uninstrumented\nfun:csinhf64=uninstrumented\nfun:csinhf64x=uninstrumented\nfun:csinhl=uninstrumented\nfun:csinl=uninstrumented\nfun:csqrt=uninstrumented\nfun:csqrtf=uninstrumented\nfun:csqrtf128=uninstrumented\nfun:csqrtf32=uninstrumented\nfun:csqrtf32x=uninstrumented\nfun:csqrtf64=uninstrumented\nfun:csqrtf64x=uninstrumented\nfun:csqrtl=uninstrumented\nfun:ctan=uninstrumented\nfun:ctanf=uninstrumented\nfun:ctanf128=uninstrumented\nfun:ctanf32=uninstrumented\nfun:ctanf32x=uninstrumented\nfun:ctanf64=uninstrumented\nfun:ctanf64x=uninstrumented\nfun:ctanh=uninstrumented\nfun:ctanhf=uninstrumented\nfun:ctanhf128=uninstrumented\nfun:ctanhf32=uninstrumented\nfun:ctanhf32x=uninstrumented\nfun:ctanhf64=uninstrumented\nfun:ctanhf64x=uninstrumented\nfun:ctanhl=uninstrumented\nfun:ctanl=uninstrumented\nfun:ctermid=uninstrumented\nfun:ctime=uninstrumented\nfun:ctime_r=uninstrumented\nfun:cuserid=uninstrumented\nfun:daemon=uninstrumented\nfun:dcgettext=uninstrumented\nfun:dcngettext=uninstrumented\nfun:delete_module=uninstrumented\nfun:des_setparity=uninstrumented\nfun:dgettext=uninstrumented\nfun:difftime=uninstrumented\nfun:dirfd=uninstrumented\nfun:dirname=uninstrumented\nfun:div=uninstrumented\nfun:dl_iterate_phdr=uninstrumented\nfun:dladdr=uninstrumented\nfun:dladdr1=uninstrumented\nfun:dlclose=uninstrumented\nfun:dlerror=uninstrumented\nfun:dlinfo=uninstrumented\nfun:dlmopen=uninstrumented\nfun:dlopen=uninstrumented\nfun:dlsym=uninstrumented\nfun:dlvsym=uninstrumented\nfun:dngettext=uninstrumented\nfun:dprintf=uninstrumented\nfun:drand48=uninstrumented\nfun:drand48_r=uninstrumented\nfun:drem=uninstrumented\nfun:dremf=uninstrumented\nfun:dreml=uninstrumented\nfun:dup=uninstrumented\nfun:dup2=uninstrumented\nfun:dup3=uninstrumented\nfun:duplocale=uninstrumented\nfun:dysize=uninstrumented\nfun:eaccess=uninstrumented\nfun:ecb_crypt=uninstrumented\nfun:ecvt=uninstrumented\nfun:ecvt_r=uninstrumented\nfun:encrypt=uninstrumented\nfun:encrypt_r=uninstrumented\nfun:endaliasent=uninstrumented\nfun:endfsent=uninstrumented\nfun:endgrent=uninstrumented\nfun:endhostent=uninstrumented\nfun:endmntent=uninstrumented\nfun:endnetent=uninstrumented\nfun:endnetgrent=uninstrumented\nfun:endprotoent=uninstrumented\nfun:endpwent=uninstrumented\nfun:endrpcent=uninstrumented\nfun:endservent=uninstrumented\nfun:endsgent=uninstrumented\nfun:endspent=uninstrumented\nfun:endttyent=uninstrumented\nfun:endusershell=uninstrumented\nfun:endutent=uninstrumented\nfun:endutxent=uninstrumented\nfun:envz_add=uninstrumented\nfun:envz_entry=uninstrumented\nfun:envz_get=uninstrumented\nfun:envz_merge=uninstrumented\nfun:envz_remove=uninstrumented\nfun:envz_strip=uninstrumented\nfun:epoll_create=uninstrumented\nfun:epoll_create1=uninstrumented\nfun:epoll_ctl=uninstrumented\nfun:epoll_pwait=uninstrumented\nfun:epoll_wait=uninstrumented\nfun:erand48=uninstrumented\nfun:erand48_r=uninstrumented\nfun:erf=uninstrumented\nfun:erfc=uninstrumented\nfun:erfcf=uninstrumented\nfun:erfcf128=uninstrumented\nfun:erfcf32=uninstrumented\nfun:erfcf32x=uninstrumented\nfun:erfcf64=uninstrumented\nfun:erfcf64x=uninstrumented\nfun:erfcl=uninstrumented\nfun:erff=uninstrumented\nfun:erff128=uninstrumented\nfun:erff32=uninstrumented\nfun:erff32x=uninstrumented\nfun:erff64=uninstrumented\nfun:erff64x=uninstrumented\nfun:erfl=uninstrumented\nfun:err=uninstrumented\nfun:error=uninstrumented\nfun:error_at_line=uninstrumented\nfun:errx=uninstrumented\nfun:ether_aton=uninstrumented\nfun:ether_aton_r=uninstrumented\nfun:ether_hostton=uninstrumented\nfun:ether_line=uninstrumented\nfun:ether_ntoa=uninstrumented\nfun:ether_ntoa_r=uninstrumented\nfun:ether_ntohost=uninstrumented\nfun:euidaccess=uninstrumented\nfun:eventfd=uninstrumented\nfun:eventfd_read=uninstrumented\nfun:eventfd_write=uninstrumented\nfun:execl=uninstrumented\nfun:execle=uninstrumented\nfun:execlp=uninstrumented\nfun:execv=uninstrumented\nfun:execve=uninstrumented\nfun:execvp=uninstrumented\nfun:execvpe=uninstrumented\nfun:exit=uninstrumented\nfun:exp=uninstrumented\nfun:exp10=uninstrumented\nfun:exp10f=uninstrumented\nfun:exp10f128=uninstrumented\nfun:exp10f32=uninstrumented\nfun:exp10f32x=uninstrumented\nfun:exp10f64=uninstrumented\nfun:exp10f64x=uninstrumented\nfun:exp10l=uninstrumented\nfun:exp2=uninstrumented\nfun:exp2f=uninstrumented\nfun:exp2f128=uninstrumented\nfun:exp2f32=uninstrumented\nfun:exp2f32x=uninstrumented\nfun:exp2f64=uninstrumented\nfun:exp2f64x=uninstrumented\nfun:exp2l=uninstrumented\nfun:expf=uninstrumented\nfun:expf128=uninstrumented\nfun:expf32=uninstrumented\nfun:expf32x=uninstrumented\nfun:expf64=uninstrumented\nfun:expf64x=uninstrumented\nfun:expl=uninstrumented\nfun:explicit_bzero=uninstrumented\nfun:expm1=uninstrumented\nfun:expm1f=uninstrumented\nfun:expm1f128=uninstrumented\nfun:expm1f32=uninstrumented\nfun:expm1f32x=uninstrumented\nfun:expm1f64=uninstrumented\nfun:expm1f64x=uninstrumented\nfun:expm1l=uninstrumented\nfun:fabs=uninstrumented\nfun:fabsf=uninstrumented\nfun:fabsf128=uninstrumented\nfun:fabsf32=uninstrumented\nfun:fabsf32x=uninstrumented\nfun:fabsf64=uninstrumented\nfun:fabsf64x=uninstrumented\nfun:fabsl=uninstrumented\nfun:faccessat=uninstrumented\nfun:fallocate=uninstrumented\nfun:fallocate64=uninstrumented\nfun:fanotify_init=uninstrumented\nfun:fanotify_mark=uninstrumented\nfun:fattach=uninstrumented\nfun:fchdir=uninstrumented\nfun:fchflags=uninstrumented\nfun:fchmod=uninstrumented\nfun:fchmodat=uninstrumented\nfun:fchown=uninstrumented\nfun:fchownat=uninstrumented\nfun:fclose=uninstrumented\nfun:fcloseall=uninstrumented\nfun:fcntl=uninstrumented\nfun:fcrypt=uninstrumented\nfun:fcvt=uninstrumented\nfun:fcvt_r=uninstrumented\nfun:fdatasync=uninstrumented\nfun:fdetach=uninstrumented\nfun:fdim=uninstrumented\nfun:fdimf=uninstrumented\nfun:fdimf128=uninstrumented\nfun:fdimf32=uninstrumented\nfun:fdimf32x=uninstrumented\nfun:fdimf64=uninstrumented\nfun:fdimf64x=uninstrumented\nfun:fdiml=uninstrumented\nfun:fdopen=uninstrumented\nfun:fdopendir=uninstrumented\nfun:feclearexcept=uninstrumented\nfun:fedisableexcept=uninstrumented\nfun:feenableexcept=uninstrumented\nfun:fegetenv=uninstrumented\nfun:fegetexcept=uninstrumented\nfun:fegetexceptflag=uninstrumented\nfun:fegetmode=uninstrumented\nfun:fegetround=uninstrumented\nfun:feholdexcept=uninstrumented\nfun:feof=uninstrumented\nfun:feof_unlocked=uninstrumented\nfun:feraiseexcept=uninstrumented\nfun:ferror=uninstrumented\nfun:ferror_unlocked=uninstrumented\nfun:fesetenv=uninstrumented\nfun:fesetexcept=uninstrumented\nfun:fesetexceptflag=uninstrumented\nfun:fesetmode=uninstrumented\nfun:fesetround=uninstrumented\nfun:fetestexcept=uninstrumented\nfun:fetestexceptflag=uninstrumented\nfun:feupdateenv=uninstrumented\nfun:fexecve=uninstrumented\nfun:fflush=uninstrumented\nfun:fflush_unlocked=uninstrumented\nfun:ffs=uninstrumented\nfun:ffsl=uninstrumented\nfun:ffsll=uninstrumented\nfun:fgetc=uninstrumented\nfun:fgetc_unlocked=uninstrumented\nfun:fgetgrent=uninstrumented\nfun:fgetgrent_r=uninstrumented\nfun:fgetpos=uninstrumented\nfun:fgetpos64=uninstrumented\nfun:fgetpwent=uninstrumented\nfun:fgetpwent_r=uninstrumented\nfun:fgets=uninstrumented\nfun:fgets_unlocked=uninstrumented\nfun:fgetsgent=uninstrumented\nfun:fgetsgent_r=uninstrumented\nfun:fgetspent=uninstrumented\nfun:fgetspent_r=uninstrumented\nfun:fgetwc=uninstrumented\nfun:fgetwc_unlocked=uninstrumented\nfun:fgetws=uninstrumented\nfun:fgetws_unlocked=uninstrumented\nfun:fgetxattr=uninstrumented\nfun:fileno=uninstrumented\nfun:fileno_unlocked=uninstrumented\nfun:finite=uninstrumented\nfun:finitef=uninstrumented\nfun:finitel=uninstrumented\nfun:flistxattr=uninstrumented\nfun:flock=uninstrumented\nfun:flockfile=uninstrumented\nfun:floor=uninstrumented\nfun:floorf=uninstrumented\nfun:floorf128=uninstrumented\nfun:floorf32=uninstrumented\nfun:floorf32x=uninstrumented\nfun:floorf64=uninstrumented\nfun:floorf64x=uninstrumented\nfun:floorl=uninstrumented\nfun:fma=uninstrumented\nfun:fmaf=uninstrumented\nfun:fmaf128=uninstrumented\nfun:fmaf32=uninstrumented\nfun:fmaf32x=uninstrumented\nfun:fmaf64=uninstrumented\nfun:fmaf64x=uninstrumented\nfun:fmal=uninstrumented\nfun:fmax=uninstrumented\nfun:fmaxf=uninstrumented\nfun:fmaxf128=uninstrumented\nfun:fmaxf32=uninstrumented\nfun:fmaxf32x=uninstrumented\nfun:fmaxf64=uninstrumented\nfun:fmaxf64x=uninstrumented\nfun:fmaxl=uninstrumented\nfun:fmaxmag=uninstrumented\nfun:fmaxmagf=uninstrumented\nfun:fmaxmagf128=uninstrumented\nfun:fmaxmagf32=uninstrumented\nfun:fmaxmagf32x=uninstrumented\nfun:fmaxmagf64=uninstrumented\nfun:fmaxmagf64x=uninstrumented\nfun:fmaxmagl=uninstrumented\nfun:fmemopen=uninstrumented\nfun:fmin=uninstrumented\nfun:fminf=uninstrumented\nfun:fminf128=uninstrumented\nfun:fminf32=uninstrumented\nfun:fminf32x=uninstrumented\nfun:fminf64=uninstrumented\nfun:fminf64x=uninstrumented\nfun:fminl=uninstrumented\nfun:fminmag=uninstrumented\nfun:fminmagf=uninstrumented\nfun:fminmagf128=uninstrumented\nfun:fminmagf32=uninstrumented\nfun:fminmagf32x=uninstrumented\nfun:fminmagf64=uninstrumented\nfun:fminmagf64x=uninstrumented\nfun:fminmagl=uninstrumented\nfun:fmod=uninstrumented\nfun:fmodf=uninstrumented\nfun:fmodf128=uninstrumented\nfun:fmodf32=uninstrumented\nfun:fmodf32x=uninstrumented\nfun:fmodf64=uninstrumented\nfun:fmodf64x=uninstrumented\nfun:fmodl=uninstrumented\nfun:fmtmsg=uninstrumented\nfun:fnmatch=uninstrumented\nfun:fopen=uninstrumented\nfun:fopen64=uninstrumented\nfun:fopencookie=uninstrumented\nfun:fork=uninstrumented\nfun:forkpty=uninstrumented\nfun:fpathconf=uninstrumented\nfun:fprintf=uninstrumented\nfun:fputc=uninstrumented\nfun:fputc_unlocked=uninstrumented\nfun:fputs=uninstrumented\nfun:fputs_unlocked=uninstrumented\nfun:fputwc=uninstrumented\nfun:fputwc_unlocked=uninstrumented\nfun:fputws=uninstrumented\nfun:fputws_unlocked=uninstrumented\nfun:fread=uninstrumented\nfun:fread_unlocked=uninstrumented\nfun:free=uninstrumented\nfun:freeaddrinfo=uninstrumented\nfun:freeifaddrs=uninstrumented\nfun:freelocale=uninstrumented\nfun:fremovexattr=uninstrumented\nfun:freopen=uninstrumented\nfun:freopen64=uninstrumented\nfun:frexp=uninstrumented\nfun:frexpf=uninstrumented\nfun:frexpf128=uninstrumented\nfun:frexpf32=uninstrumented\nfun:frexpf32x=uninstrumented\nfun:frexpf64=uninstrumented\nfun:frexpf64x=uninstrumented\nfun:frexpl=uninstrumented\nfun:fromfp=uninstrumented\nfun:fromfpf=uninstrumented\nfun:fromfpf128=uninstrumented\nfun:fromfpf32=uninstrumented\nfun:fromfpf32x=uninstrumented\nfun:fromfpf64=uninstrumented\nfun:fromfpf64x=uninstrumented\nfun:fromfpl=uninstrumented\nfun:fromfpx=uninstrumented\nfun:fromfpxf=uninstrumented\nfun:fromfpxf128=uninstrumented\nfun:fromfpxf32=uninstrumented\nfun:fromfpxf32x=uninstrumented\nfun:fromfpxf64=uninstrumented\nfun:fromfpxf64x=uninstrumented\nfun:fromfpxl=uninstrumented\nfun:fscanf=uninstrumented\nfun:fseek=uninstrumented\nfun:fseeko=uninstrumented\nfun:fseeko64=uninstrumented\nfun:fsetpos=uninstrumented\nfun:fsetpos64=uninstrumented\nfun:fsetxattr=uninstrumented\nfun:fstat=uninstrumented\nfun:fstat64=uninstrumented\nfun:fstatat=uninstrumented\nfun:fstatat64=uninstrumented\nfun:fstatfs=uninstrumented\nfun:fstatfs64=uninstrumented\nfun:fstatvfs=uninstrumented\nfun:fstatvfs64=uninstrumented\nfun:fsync=uninstrumented\nfun:ftell=uninstrumented\nfun:ftello=uninstrumented\nfun:ftello64=uninstrumented\nfun:ftime=uninstrumented\nfun:ftok=uninstrumented\nfun:ftruncate=uninstrumented\nfun:ftruncate64=uninstrumented\nfun:ftrylockfile=uninstrumented\nfun:fts64_children=uninstrumented\nfun:fts64_close=uninstrumented\nfun:fts64_open=uninstrumented\nfun:fts64_read=uninstrumented\nfun:fts64_set=uninstrumented\nfun:fts_children=uninstrumented\nfun:fts_close=uninstrumented\nfun:fts_open=uninstrumented\nfun:fts_read=uninstrumented\nfun:fts_set=uninstrumented\nfun:ftw=uninstrumented\nfun:ftw64=uninstrumented\nfun:funlockfile=uninstrumented\nfun:futimens=uninstrumented\nfun:futimes=uninstrumented\nfun:futimesat=uninstrumented\nfun:fwide=uninstrumented\nfun:fwprintf=uninstrumented\nfun:fwrite=uninstrumented\nfun:fwrite_unlocked=uninstrumented\nfun:fwscanf=uninstrumented\nfun:gai_cancel=uninstrumented\nfun:gai_error=uninstrumented\nfun:gai_strerror=uninstrumented\nfun:gai_suspend=uninstrumented\nfun:gamma=uninstrumented\nfun:gammaf=uninstrumented\nfun:gammal=uninstrumented\nfun:gcvt=uninstrumented\nfun:get_avphys_pages=uninstrumented\nfun:get_current_dir_name=uninstrumented\nfun:get_kernel_syms=uninstrumented\nfun:get_myaddress=uninstrumented\nfun:get_nprocs=uninstrumented\nfun:get_nprocs_conf=uninstrumented\nfun:get_phys_pages=uninstrumented\nfun:getaddrinfo=uninstrumented\nfun:getaddrinfo_a=uninstrumented\nfun:getaliasbyname=uninstrumented\nfun:getaliasbyname_r=uninstrumented\nfun:getaliasent=uninstrumented\nfun:getaliasent_r=uninstrumented\nfun:getauxval=uninstrumented\nfun:getc=uninstrumented\nfun:getc_unlocked=uninstrumented\nfun:getchar=uninstrumented\nfun:getchar_unlocked=uninstrumented\nfun:getcontext=uninstrumented\nfun:getcwd=uninstrumented\nfun:getdate=uninstrumented\nfun:getdate_r=uninstrumented\nfun:getdelim=uninstrumented\nfun:getdirentries=uninstrumented\nfun:getdirentries64=uninstrumented\nfun:getdomainname=uninstrumented\nfun:getdtablesize=uninstrumented\nfun:getegid=uninstrumented\nfun:getentropy=uninstrumented\nfun:getenv=uninstrumented\nfun:geteuid=uninstrumented\nfun:getfsent=uninstrumented\nfun:getfsfile=uninstrumented\nfun:getfsspec=uninstrumented\nfun:getgid=uninstrumented\nfun:getgrent=uninstrumented\nfun:getgrent_r=uninstrumented\nfun:getgrgid=uninstrumented\nfun:getgrgid_r=uninstrumented\nfun:getgrnam=uninstrumented\nfun:getgrnam_r=uninstrumented\nfun:getgrouplist=uninstrumented\nfun:getgroups=uninstrumented\nfun:gethostbyaddr=uninstrumented\nfun:gethostbyaddr_r=uninstrumented\nfun:gethostbyname=uninstrumented\nfun:gethostbyname2=uninstrumented\nfun:gethostbyname2_r=uninstrumented\nfun:gethostbyname_r=uninstrumented\nfun:gethostent=uninstrumented\nfun:gethostent_r=uninstrumented\nfun:gethostid=uninstrumented\nfun:gethostname=uninstrumented\nfun:getifaddrs=uninstrumented\nfun:getipv4sourcefilter=uninstrumented\nfun:getitimer=uninstrumented\nfun:getline=uninstrumented\nfun:getloadavg=uninstrumented\nfun:getlogin=uninstrumented\nfun:getlogin_r=uninstrumented\nfun:getmntent=uninstrumented\nfun:getmntent_r=uninstrumented\nfun:getmsg=uninstrumented\nfun:getnameinfo=uninstrumented\nfun:getnetbyaddr=uninstrumented\nfun:getnetbyaddr_r=uninstrumented\nfun:getnetbyname=uninstrumented\nfun:getnetbyname_r=uninstrumented\nfun:getnetent=uninstrumented\nfun:getnetent_r=uninstrumented\nfun:getnetgrent=uninstrumented\nfun:getnetgrent_r=uninstrumented\nfun:getnetname=uninstrumented\nfun:getopt=uninstrumented\nfun:getopt_long=uninstrumented\nfun:getopt_long_only=uninstrumented\nfun:getpagesize=uninstrumented\nfun:getpass=uninstrumented\nfun:getpayload=uninstrumented\nfun:getpayloadf=uninstrumented\nfun:getpayloadf128=uninstrumented\nfun:getpayloadf32=uninstrumented\nfun:getpayloadf32x=uninstrumented\nfun:getpayloadf64=uninstrumented\nfun:getpayloadf64x=uninstrumented\nfun:getpayloadl=uninstrumented\nfun:getpeername=uninstrumented\nfun:getpgid=uninstrumented\nfun:getpgrp=uninstrumented\nfun:getpid=uninstrumented\nfun:getpmsg=uninstrumented\nfun:getppid=uninstrumented\nfun:getpriority=uninstrumented\nfun:getprotobyname=uninstrumented\nfun:getprotobyname_r=uninstrumented\nfun:getprotobynumber=uninstrumented\nfun:getprotobynumber_r=uninstrumented\nfun:getprotoent=uninstrumented\nfun:getprotoent_r=uninstrumented\nfun:getpt=uninstrumented\nfun:getpublickey=uninstrumented\nfun:getpw=uninstrumented\nfun:getpwent=uninstrumented\nfun:getpwent_r=uninstrumented\nfun:getpwnam=uninstrumented\nfun:getpwnam_r=uninstrumented\nfun:getpwuid=uninstrumented\nfun:getpwuid_r=uninstrumented\nfun:getrandom=uninstrumented\nfun:getresgid=uninstrumented\nfun:getresuid=uninstrumented\nfun:getrlimit=uninstrumented\nfun:getrlimit64=uninstrumented\nfun:getrpcbyname=uninstrumented\nfun:getrpcbyname_r=uninstrumented\nfun:getrpcbynumber=uninstrumented\nfun:getrpcbynumber_r=uninstrumented\nfun:getrpcent=uninstrumented\nfun:getrpcent_r=uninstrumented\nfun:getrpcport=uninstrumented\nfun:getrusage=uninstrumented\nfun:gets=uninstrumented\nfun:getsecretkey=uninstrumented\nfun:getservbyname=uninstrumented\nfun:getservbyname_r=uninstrumented\nfun:getservbyport=uninstrumented\nfun:getservbyport_r=uninstrumented\nfun:getservent=uninstrumented\nfun:getservent_r=uninstrumented\nfun:getsgent=uninstrumented\nfun:getsgent_r=uninstrumented\nfun:getsgnam=uninstrumented\nfun:getsgnam_r=uninstrumented\nfun:getsid=uninstrumented\nfun:getsockname=uninstrumented\nfun:getsockopt=uninstrumented\nfun:getsourcefilter=uninstrumented\nfun:getspent=uninstrumented\nfun:getspent_r=uninstrumented\nfun:getspnam=uninstrumented\nfun:getspnam_r=uninstrumented\nfun:getsubopt=uninstrumented\nfun:gettext=uninstrumented\nfun:gettimeofday=uninstrumented\nfun:getttyent=uninstrumented\nfun:getttynam=uninstrumented\nfun:getuid=uninstrumented\nfun:getusershell=uninstrumented\nfun:getutent=uninstrumented\nfun:getutent_r=uninstrumented\nfun:getutid=uninstrumented\nfun:getutid_r=uninstrumented\nfun:getutline=uninstrumented\nfun:getutline_r=uninstrumented\nfun:getutmp=uninstrumented\nfun:getutmpx=uninstrumented\nfun:getutxent=uninstrumented\nfun:getutxid=uninstrumented\nfun:getutxline=uninstrumented\nfun:getw=uninstrumented\nfun:getwc=uninstrumented\nfun:getwc_unlocked=uninstrumented\nfun:getwchar=uninstrumented\nfun:getwchar_unlocked=uninstrumented\nfun:getwd=uninstrumented\nfun:getxattr=uninstrumented\nfun:glob=uninstrumented\nfun:glob64=uninstrumented\nfun:glob_pattern_p=uninstrumented\nfun:globfree=uninstrumented\nfun:globfree64=uninstrumented\nfun:gmtime=uninstrumented\nfun:gmtime_r=uninstrumented\nfun:gnu_dev_major=uninstrumented\nfun:gnu_dev_makedev=uninstrumented\nfun:gnu_dev_minor=uninstrumented\nfun:gnu_get_libc_release=uninstrumented\nfun:gnu_get_libc_version=uninstrumented\nfun:grantpt=uninstrumented\nfun:group_member=uninstrumented\nfun:gsignal=uninstrumented\nfun:gtty=uninstrumented\nfun:hasmntopt=uninstrumented\nfun:hcreate=uninstrumented\nfun:hcreate_r=uninstrumented\nfun:hdestroy=uninstrumented\nfun:hdestroy_r=uninstrumented\nfun:herror=uninstrumented\nfun:host2netname=uninstrumented\nfun:hsearch=uninstrumented\nfun:hsearch_r=uninstrumented\nfun:hstrerror=uninstrumented\nfun:htonl=uninstrumented\nfun:htons=uninstrumented\nfun:hypot=uninstrumented\nfun:hypotf=uninstrumented\nfun:hypotf128=uninstrumented\nfun:hypotf32=uninstrumented\nfun:hypotf32x=uninstrumented\nfun:hypotf64=uninstrumented\nfun:hypotf64x=uninstrumented\nfun:hypotl=uninstrumented\nfun:iconv=uninstrumented\nfun:iconv_close=uninstrumented\nfun:iconv_open=uninstrumented\nfun:idna_to_ascii_lz=uninstrumented\nfun:idna_to_unicode_lzlz=uninstrumented\nfun:if_freenameindex=uninstrumented\nfun:if_indextoname=uninstrumented\nfun:if_nameindex=uninstrumented\nfun:if_nametoindex=uninstrumented\nfun:ilogb=uninstrumented\nfun:ilogbf=uninstrumented\nfun:ilogbf128=uninstrumented\nfun:ilogbf32=uninstrumented\nfun:ilogbf32x=uninstrumented\nfun:ilogbf64=uninstrumented\nfun:ilogbf64x=uninstrumented\nfun:ilogbl=uninstrumented\nfun:imaxabs=uninstrumented\nfun:imaxdiv=uninstrumented\nfun:index=uninstrumented\nfun:inet6_opt_append=uninstrumented\nfun:inet6_opt_find=uninstrumented\nfun:inet6_opt_finish=uninstrumented\nfun:inet6_opt_get_val=uninstrumented\nfun:inet6_opt_init=uninstrumented\nfun:inet6_opt_next=uninstrumented\nfun:inet6_opt_set_val=uninstrumented\nfun:inet6_option_alloc=uninstrumented\nfun:inet6_option_append=uninstrumented\nfun:inet6_option_find=uninstrumented\nfun:inet6_option_init=uninstrumented\nfun:inet6_option_next=uninstrumented\nfun:inet6_option_space=uninstrumented\nfun:inet6_rth_add=uninstrumented\nfun:inet6_rth_getaddr=uninstrumented\nfun:inet6_rth_init=uninstrumented\nfun:inet6_rth_reverse=uninstrumented\nfun:inet6_rth_segments=uninstrumented\nfun:inet6_rth_space=uninstrumented\nfun:inet_addr=uninstrumented\nfun:inet_aton=uninstrumented\nfun:inet_lnaof=uninstrumented\nfun:inet_makeaddr=uninstrumented\nfun:inet_net_ntop=uninstrumented\nfun:inet_net_pton=uninstrumented\nfun:inet_neta=uninstrumented\nfun:inet_netof=uninstrumented\nfun:inet_network=uninstrumented\nfun:inet_nsap_addr=uninstrumented\nfun:inet_nsap_ntoa=uninstrumented\nfun:inet_ntoa=uninstrumented\nfun:inet_ntop=uninstrumented\nfun:inet_pton=uninstrumented\nfun:init_module=uninstrumented\nfun:initgroups=uninstrumented\nfun:initstate=uninstrumented\nfun:initstate_r=uninstrumented\nfun:innetgr=uninstrumented\nfun:inotify_add_watch=uninstrumented\nfun:inotify_init=uninstrumented\nfun:inotify_init1=uninstrumented\nfun:inotify_rm_watch=uninstrumented\nfun:insque=uninstrumented\nfun:ioctl=uninstrumented\nfun:ioperm=uninstrumented\nfun:iopl=uninstrumented\nfun:iruserok=uninstrumented\nfun:iruserok_af=uninstrumented\nfun:isalnum=uninstrumented\nfun:isalnum_l=uninstrumented\nfun:isalpha=uninstrumented\nfun:isalpha_l=uninstrumented\nfun:isascii=uninstrumented\nfun:isastream=uninstrumented\nfun:isatty=uninstrumented\nfun:isblank=uninstrumented\nfun:isblank_l=uninstrumented\nfun:iscntrl=uninstrumented\nfun:iscntrl_l=uninstrumented\nfun:isctype=uninstrumented\nfun:isdigit=uninstrumented\nfun:isdigit_l=uninstrumented\nfun:isfdtype=uninstrumented\nfun:isgraph=uninstrumented\nfun:isgraph_l=uninstrumented\nfun:isinf=uninstrumented\nfun:isinfd128=uninstrumented\nfun:isinfd32=uninstrumented\nfun:isinfd64=uninstrumented\nfun:isinff=uninstrumented\nfun:isinfl=uninstrumented\nfun:islower=uninstrumented\nfun:islower_l=uninstrumented\nfun:isnan=uninstrumented\nfun:isnanf=uninstrumented\nfun:isnanl=uninstrumented\nfun:isprint=uninstrumented\nfun:isprint_l=uninstrumented\nfun:ispunct=uninstrumented\nfun:ispunct_l=uninstrumented\nfun:isspace=uninstrumented\nfun:isspace_l=uninstrumented\nfun:isupper=uninstrumented\nfun:isupper_l=uninstrumented\nfun:iswalnum=uninstrumented\nfun:iswalnum_l=uninstrumented\nfun:iswalpha=uninstrumented\nfun:iswalpha_l=uninstrumented\nfun:iswblank=uninstrumented\nfun:iswblank_l=uninstrumented\nfun:iswcntrl=uninstrumented\nfun:iswcntrl_l=uninstrumented\nfun:iswctype=uninstrumented\nfun:iswctype_l=uninstrumented\nfun:iswdigit=uninstrumented\nfun:iswdigit_l=uninstrumented\nfun:iswgraph=uninstrumented\nfun:iswgraph_l=uninstrumented\nfun:iswlower=uninstrumented\nfun:iswlower_l=uninstrumented\nfun:iswprint=uninstrumented\nfun:iswprint_l=uninstrumented\nfun:iswpunct=uninstrumented\nfun:iswpunct_l=uninstrumented\nfun:iswspace=uninstrumented\nfun:iswspace_l=uninstrumented\nfun:iswupper=uninstrumented\nfun:iswupper_l=uninstrumented\nfun:iswxdigit=uninstrumented\nfun:iswxdigit_l=uninstrumented\nfun:isxdigit=uninstrumented\nfun:isxdigit_l=uninstrumented\nfun:j0=uninstrumented\nfun:j0f=uninstrumented\nfun:j0f128=uninstrumented\nfun:j0f32=uninstrumented\nfun:j0f32x=uninstrumented\nfun:j0f64=uninstrumented\nfun:j0f64x=uninstrumented\nfun:j0l=uninstrumented\nfun:j1=uninstrumented\nfun:j1f=uninstrumented\nfun:j1f128=uninstrumented\nfun:j1f32=uninstrumented\nfun:j1f32x=uninstrumented\nfun:j1f64=uninstrumented\nfun:j1f64x=uninstrumented\nfun:j1l=uninstrumented\nfun:jn=uninstrumented\nfun:jnf=uninstrumented\nfun:jnf128=uninstrumented\nfun:jnf32=uninstrumented\nfun:jnf32x=uninstrumented\nfun:jnf64=uninstrumented\nfun:jnf64x=uninstrumented\nfun:jnl=uninstrumented\nfun:jrand48=uninstrumented\nfun:jrand48_r=uninstrumented\nfun:key_decryptsession=uninstrumented\nfun:key_decryptsession_pk=uninstrumented\nfun:key_encryptsession=uninstrumented\nfun:key_encryptsession_pk=uninstrumented\nfun:key_gendes=uninstrumented\nfun:key_get_conv=uninstrumented\nfun:key_secretkey_is_set=uninstrumented\nfun:key_setnet=uninstrumented\nfun:key_setsecret=uninstrumented\nfun:kill=uninstrumented\nfun:killpg=uninstrumented\nfun:klogctl=uninstrumented\nfun:l64a=uninstrumented\nfun:labs=uninstrumented\nfun:lchmod=uninstrumented\nfun:lchown=uninstrumented\nfun:lckpwdf=uninstrumented\nfun:lcong48=uninstrumented\nfun:lcong48_r=uninstrumented\nfun:ldexp=uninstrumented\nfun:ldexpf=uninstrumented\nfun:ldexpf128=uninstrumented\nfun:ldexpf32=uninstrumented\nfun:ldexpf32x=uninstrumented\nfun:ldexpf64=uninstrumented\nfun:ldexpf64x=uninstrumented\nfun:ldexpl=uninstrumented\nfun:ldiv=uninstrumented\nfun:lfind=uninstrumented\nfun:lgamma=uninstrumented\nfun:lgamma_r=uninstrumented\nfun:lgammaf=uninstrumented\nfun:lgammaf128=uninstrumented\nfun:lgammaf128_r=uninstrumented\nfun:lgammaf32=uninstrumented\nfun:lgammaf32_r=uninstrumented\nfun:lgammaf32x=uninstrumented\nfun:lgammaf32x_r=uninstrumented\nfun:lgammaf64=uninstrumented\nfun:lgammaf64_r=uninstrumented\nfun:lgammaf64x=uninstrumented\nfun:lgammaf64x_r=uninstrumented\nfun:lgammaf_r=uninstrumented\nfun:lgammal=uninstrumented\nfun:lgammal_r=uninstrumented\nfun:lgetxattr=uninstrumented\nfun:link=uninstrumented\nfun:linkat=uninstrumented\nfun:lio_listio=uninstrumented\nfun:lio_listio64=uninstrumented\nfun:listen=uninstrumented\nfun:listxattr=uninstrumented\nfun:llabs=uninstrumented\nfun:lldiv=uninstrumented\nfun:llistxattr=uninstrumented\nfun:llogb=uninstrumented\nfun:llogbf=uninstrumented\nfun:llogbf128=uninstrumented\nfun:llogbf32=uninstrumented\nfun:llogbf32x=uninstrumented\nfun:llogbf64=uninstrumented\nfun:llogbf64x=uninstrumented\nfun:llogbl=uninstrumented\nfun:llrint=uninstrumented\nfun:llrintf=uninstrumented\nfun:llrintf128=uninstrumented\nfun:llrintf32=uninstrumented\nfun:llrintf32x=uninstrumented\nfun:llrintf64=uninstrumented\nfun:llrintf64x=uninstrumented\nfun:llrintl=uninstrumented\nfun:llround=uninstrumented\nfun:llroundf=uninstrumented\nfun:llroundf128=uninstrumented\nfun:llroundf32=uninstrumented\nfun:llroundf32x=uninstrumented\nfun:llroundf64=uninstrumented\nfun:llroundf64x=uninstrumented\nfun:llroundl=uninstrumented\nfun:llseek=uninstrumented\nfun:localeconv=uninstrumented\nfun:localtime=uninstrumented\nfun:localtime_r=uninstrumented\nfun:lockf=uninstrumented\nfun:lockf64=uninstrumented\nfun:log=uninstrumented\nfun:log10=uninstrumented\nfun:log10f=uninstrumented\nfun:log10f128=uninstrumented\nfun:log10f32=uninstrumented\nfun:log10f32x=uninstrumented\nfun:log10f64=uninstrumented\nfun:log10f64x=uninstrumented\nfun:log10l=uninstrumented\nfun:log1p=uninstrumented\nfun:log1pf=uninstrumented\nfun:log1pf128=uninstrumented\nfun:log1pf32=uninstrumented\nfun:log1pf32x=uninstrumented\nfun:log1pf64=uninstrumented\nfun:log1pf64x=uninstrumented\nfun:log1pl=uninstrumented\nfun:log2=uninstrumented\nfun:log2f=uninstrumented\nfun:log2f128=uninstrumented\nfun:log2f32=uninstrumented\nfun:log2f32x=uninstrumented\nfun:log2f64=uninstrumented\nfun:log2f64x=uninstrumented\nfun:log2l=uninstrumented\nfun:logb=uninstrumented\nfun:logbf=uninstrumented\nfun:logbf128=uninstrumented\nfun:logbf32=uninstrumented\nfun:logbf32x=uninstrumented\nfun:logbf64=uninstrumented\nfun:logbf64x=uninstrumented\nfun:logbl=uninstrumented\nfun:logf=uninstrumented\nfun:logf128=uninstrumented\nfun:logf32=uninstrumented\nfun:logf32x=uninstrumented\nfun:logf64=uninstrumented\nfun:logf64x=uninstrumented\nfun:login=uninstrumented\nfun:login_tty=uninstrumented\nfun:logl=uninstrumented\nfun:logout=uninstrumented\nfun:logwtmp=uninstrumented\nfun:longjmp=uninstrumented\nfun:lrand48=uninstrumented\nfun:lrand48_r=uninstrumented\nfun:lremovexattr=uninstrumented\nfun:lrint=uninstrumented\nfun:lrintf=uninstrumented\nfun:lrintf128=uninstrumented\nfun:lrintf32=uninstrumented\nfun:lrintf32x=uninstrumented\nfun:lrintf64=uninstrumented\nfun:lrintf64x=uninstrumented\nfun:lrintl=uninstrumented\nfun:lround=uninstrumented\nfun:lroundf=uninstrumented\nfun:lroundf128=uninstrumented\nfun:lroundf32=uninstrumented\nfun:lroundf32x=uninstrumented\nfun:lroundf64=uninstrumented\nfun:lroundf64x=uninstrumented\nfun:lroundl=uninstrumented\nfun:lsearch=uninstrumented\nfun:lseek=uninstrumented\nfun:lseek64=uninstrumented\nfun:lsetxattr=uninstrumented\nfun:lstat=uninstrumented\nfun:lstat64=uninstrumented\nfun:lutimes=uninstrumented\nfun:madvise=uninstrumented\nfun:makecontext=uninstrumented\nfun:mallinfo=uninstrumented\nfun:malloc=uninstrumented\nfun:malloc_get_state=uninstrumented\nfun:malloc_info=uninstrumented\nfun:malloc_set_state=uninstrumented\nfun:malloc_stats=uninstrumented\nfun:malloc_trim=uninstrumented\nfun:malloc_usable_size=uninstrumented\nfun:mallopt=uninstrumented\nfun:matherr=uninstrumented\nfun:mblen=uninstrumented\nfun:mbrlen=uninstrumented\nfun:mbrtoc16=uninstrumented\nfun:mbrtoc32=uninstrumented\nfun:mbrtowc=uninstrumented\nfun:mbsinit=uninstrumented\nfun:mbsnrtowcs=uninstrumented\nfun:mbsrtowcs=uninstrumented\nfun:mbstowcs=uninstrumented\nfun:mbtowc=uninstrumented\nfun:mcheck=uninstrumented\nfun:mcheck_check_all=uninstrumented\nfun:mcheck_pedantic=uninstrumented\nfun:mcount=uninstrumented\nfun:memalign=uninstrumented\nfun:memccpy=uninstrumented\nfun:memchr=uninstrumented\nfun:memcmp=uninstrumented\nfun:memcpy=uninstrumented\nfun:memfd_create=uninstrumented\nfun:memfrob=uninstrumented\nfun:memmem=uninstrumented\nfun:memmove=uninstrumented\nfun:mempcpy=uninstrumented\nfun:memrchr=uninstrumented\nfun:memset=uninstrumented\nfun:mincore=uninstrumented\nfun:mkdir=uninstrumented\nfun:mkdirat=uninstrumented\nfun:mkdtemp=uninstrumented\nfun:mkfifo=uninstrumented\nfun:mkfifoat=uninstrumented\nfun:mknod=uninstrumented\nfun:mknodat=uninstrumented\nfun:mkostemp=uninstrumented\nfun:mkostemp64=uninstrumented\nfun:mkostemps=uninstrumented\nfun:mkostemps64=uninstrumented\nfun:mkstemp=uninstrumented\nfun:mkstemp64=uninstrumented\nfun:mkstemps=uninstrumented\nfun:mkstemps64=uninstrumented\nfun:mktemp=uninstrumented\nfun:mktime=uninstrumented\nfun:mlock=uninstrumented\nfun:mlock2=uninstrumented\nfun:mlockall=uninstrumented\nfun:mmap=uninstrumented\nfun:mmap64=uninstrumented\nfun:modf=uninstrumented\nfun:modff=uninstrumented\nfun:modff128=uninstrumented\nfun:modff32=uninstrumented\nfun:modff32x=uninstrumented\nfun:modff64=uninstrumented\nfun:modff64x=uninstrumented\nfun:modfl=uninstrumented\nfun:modify_ldt=uninstrumented\nfun:moncontrol=uninstrumented\nfun:monstartup=uninstrumented\nfun:mount=uninstrumented\nfun:mprobe=uninstrumented\nfun:mprotect=uninstrumented\nfun:mq_close=uninstrumented\nfun:mq_getattr=uninstrumented\nfun:mq_notify=uninstrumented\nfun:mq_open=uninstrumented\nfun:mq_receive=uninstrumented\nfun:mq_send=uninstrumented\nfun:mq_setattr=uninstrumented\nfun:mq_timedreceive=uninstrumented\nfun:mq_timedsend=uninstrumented\nfun:mq_unlink=uninstrumented\nfun:mrand48=uninstrumented\nfun:mrand48_r=uninstrumented\nfun:mremap=uninstrumented\nfun:msgctl=uninstrumented\nfun:msgget=uninstrumented\nfun:msgrcv=uninstrumented\nfun:msgsnd=uninstrumented\nfun:msync=uninstrumented\nfun:mtrace=uninstrumented\nfun:munlock=uninstrumented\nfun:munlockall=uninstrumented\nfun:munmap=uninstrumented\nfun:muntrace=uninstrumented\nfun:name_to_handle_at=uninstrumented\nfun:nan=uninstrumented\nfun:nanf=uninstrumented\nfun:nanf128=uninstrumented\nfun:nanf32=uninstrumented\nfun:nanf32x=uninstrumented\nfun:nanf64=uninstrumented\nfun:nanf64x=uninstrumented\nfun:nanl=uninstrumented\nfun:nanosleep=uninstrumented\nfun:nearbyint=uninstrumented\nfun:nearbyintf=uninstrumented\nfun:nearbyintf128=uninstrumented\nfun:nearbyintf32=uninstrumented\nfun:nearbyintf32x=uninstrumented\nfun:nearbyintf64=uninstrumented\nfun:nearbyintf64x=uninstrumented\nfun:nearbyintl=uninstrumented\nfun:netname2host=uninstrumented\nfun:netname2user=uninstrumented\nfun:newlocale=uninstrumented\nfun:nextafter=uninstrumented\nfun:nextafterf=uninstrumented\nfun:nextafterf128=uninstrumented\nfun:nextafterf32=uninstrumented\nfun:nextafterf32x=uninstrumented\nfun:nextafterf64=uninstrumented\nfun:nextafterf64x=uninstrumented\nfun:nextafterl=uninstrumented\nfun:nextdown=uninstrumented\nfun:nextdownf=uninstrumented\nfun:nextdownf128=uninstrumented\nfun:nextdownf32=uninstrumented\nfun:nextdownf32x=uninstrumented\nfun:nextdownf64=uninstrumented\nfun:nextdownf64x=uninstrumented\nfun:nextdownl=uninstrumented\nfun:nexttoward=uninstrumented\nfun:nexttowardf=uninstrumented\nfun:nexttowardl=uninstrumented\nfun:nextup=uninstrumented\nfun:nextupf=uninstrumented\nfun:nextupf128=uninstrumented\nfun:nextupf32=uninstrumented\nfun:nextupf32x=uninstrumented\nfun:nextupf64=uninstrumented\nfun:nextupf64x=uninstrumented\nfun:nextupl=uninstrumented\nfun:nfsservctl=uninstrumented\nfun:nftw=uninstrumented\nfun:nftw64=uninstrumented\nfun:ngettext=uninstrumented\nfun:nice=uninstrumented\nfun:nis_add=uninstrumented\nfun:nis_add_entry=uninstrumented\nfun:nis_addmember=uninstrumented\nfun:nis_checkpoint=uninstrumented\nfun:nis_clone_directory=uninstrumented\nfun:nis_clone_object=uninstrumented\nfun:nis_clone_result=uninstrumented\nfun:nis_creategroup=uninstrumented\nfun:nis_destroy_object=uninstrumented\nfun:nis_destroygroup=uninstrumented\nfun:nis_dir_cmp=uninstrumented\nfun:nis_domain_of=uninstrumented\nfun:nis_domain_of_r=uninstrumented\nfun:nis_first_entry=uninstrumented\nfun:nis_free_directory=uninstrumented\nfun:nis_free_object=uninstrumented\nfun:nis_free_request=uninstrumented\nfun:nis_freenames=uninstrumented\nfun:nis_freeresult=uninstrumented\nfun:nis_freeservlist=uninstrumented\nfun:nis_freetags=uninstrumented\nfun:nis_getnames=uninstrumented\nfun:nis_getservlist=uninstrumented\nfun:nis_ismember=uninstrumented\nfun:nis_leaf_of=uninstrumented\nfun:nis_leaf_of_r=uninstrumented\nfun:nis_lerror=uninstrumented\nfun:nis_list=uninstrumented\nfun:nis_local_directory=uninstrumented\nfun:nis_local_group=uninstrumented\nfun:nis_local_host=uninstrumented\nfun:nis_local_principal=uninstrumented\nfun:nis_lookup=uninstrumented\nfun:nis_mkdir=uninstrumented\nfun:nis_modify=uninstrumented\nfun:nis_modify_entry=uninstrumented\nfun:nis_name_of=uninstrumented\nfun:nis_name_of_r=uninstrumented\nfun:nis_next_entry=uninstrumented\nfun:nis_perror=uninstrumented\nfun:nis_ping=uninstrumented\nfun:nis_print_directory=uninstrumented\nfun:nis_print_entry=uninstrumented\nfun:nis_print_group=uninstrumented\nfun:nis_print_group_entry=uninstrumented\nfun:nis_print_link=uninstrumented\nfun:nis_print_object=uninstrumented\nfun:nis_print_result=uninstrumented\nfun:nis_print_rights=uninstrumented\nfun:nis_print_table=uninstrumented\nfun:nis_read_obj=uninstrumented\nfun:nis_remove=uninstrumented\nfun:nis_remove_entry=uninstrumented\nfun:nis_removemember=uninstrumented\nfun:nis_rmdir=uninstrumented\nfun:nis_servstate=uninstrumented\nfun:nis_sperrno=uninstrumented\nfun:nis_sperror=uninstrumented\nfun:nis_sperror_r=uninstrumented\nfun:nis_stats=uninstrumented\nfun:nis_verifygroup=uninstrumented\nfun:nis_write_obj=uninstrumented\nfun:nl_langinfo=uninstrumented\nfun:nl_langinfo_l=uninstrumented\nfun:nrand48=uninstrumented\nfun:nrand48_r=uninstrumented\nfun:ns_datetosecs=uninstrumented\nfun:ns_format_ttl=uninstrumented\nfun:ns_get16=uninstrumented\nfun:ns_get32=uninstrumented\nfun:ns_initparse=uninstrumented\nfun:ns_makecanon=uninstrumented\nfun:ns_msg_getflag=uninstrumented\nfun:ns_name_compress=uninstrumented\nfun:ns_name_ntol=uninstrumented\nfun:ns_name_ntop=uninstrumented\nfun:ns_name_pack=uninstrumented\nfun:ns_name_pton=uninstrumented\nfun:ns_name_rollback=uninstrumented\nfun:ns_name_skip=uninstrumented\nfun:ns_name_uncompress=uninstrumented\nfun:ns_name_unpack=uninstrumented\nfun:ns_parse_ttl=uninstrumented\nfun:ns_parserr=uninstrumented\nfun:ns_put16=uninstrumented\nfun:ns_put32=uninstrumented\nfun:ns_samedomain=uninstrumented\nfun:ns_samename=uninstrumented\nfun:ns_skiprr=uninstrumented\nfun:ns_sprintrr=uninstrumented\nfun:ns_sprintrrf=uninstrumented\nfun:ns_subdomain=uninstrumented\nfun:ntohl=uninstrumented\nfun:ntohs=uninstrumented\nfun:ntp_adjtime=uninstrumented\nfun:ntp_gettime=uninstrumented\nfun:ntp_gettimex=uninstrumented\nfun:obstack_free=uninstrumented\nfun:obstack_printf=uninstrumented\nfun:obstack_vprintf=uninstrumented\nfun:on_exit=uninstrumented\nfun:open=uninstrumented\nfun:open64=uninstrumented\nfun:open_by_handle_at=uninstrumented\nfun:open_memstream=uninstrumented\nfun:open_wmemstream=uninstrumented\nfun:openat=uninstrumented\nfun:openat64=uninstrumented\nfun:opendir=uninstrumented\nfun:openlog=uninstrumented\nfun:openpty=uninstrumented\nfun:parse_printf_format=uninstrumented\nfun:passwd2des=uninstrumented\nfun:pathconf=uninstrumented\nfun:pause=uninstrumented\nfun:pclose=uninstrumented\nfun:perror=uninstrumented\nfun:personality=uninstrumented\nfun:pipe=uninstrumented\nfun:pipe2=uninstrumented\nfun:pivot_root=uninstrumented\nfun:pkey_alloc=uninstrumented\nfun:pkey_free=uninstrumented\nfun:pkey_get=uninstrumented\nfun:pkey_mprotect=uninstrumented\nfun:pkey_set=uninstrumented\nfun:pmap_getmaps=uninstrumented\nfun:pmap_getport=uninstrumented\nfun:pmap_rmtcall=uninstrumented\nfun:pmap_set=uninstrumented\nfun:pmap_unset=uninstrumented\nfun:poll=uninstrumented\nfun:popen=uninstrumented\nfun:posix_fadvise=uninstrumented\nfun:posix_fadvise64=uninstrumented\nfun:posix_fallocate=uninstrumented\nfun:posix_fallocate64=uninstrumented\nfun:posix_madvise=uninstrumented\nfun:posix_memalign=uninstrumented\nfun:posix_openpt=uninstrumented\nfun:posix_spawn=uninstrumented\nfun:posix_spawn_file_actions_addclose=uninstrumented\nfun:posix_spawn_file_actions_adddup2=uninstrumented\nfun:posix_spawn_file_actions_addopen=uninstrumented\nfun:posix_spawn_file_actions_destroy=uninstrumented\nfun:posix_spawn_file_actions_init=uninstrumented\nfun:posix_spawnattr_destroy=uninstrumented\nfun:posix_spawnattr_getflags=uninstrumented\nfun:posix_spawnattr_getpgroup=uninstrumented\nfun:posix_spawnattr_getschedparam=uninstrumented\nfun:posix_spawnattr_getschedpolicy=uninstrumented\nfun:posix_spawnattr_getsigdefault=uninstrumented\nfun:posix_spawnattr_getsigmask=uninstrumented\nfun:posix_spawnattr_init=uninstrumented\nfun:posix_spawnattr_setflags=uninstrumented\nfun:posix_spawnattr_setpgroup=uninstrumented\nfun:posix_spawnattr_setschedparam=uninstrumented\nfun:posix_spawnattr_setschedpolicy=uninstrumented\nfun:posix_spawnattr_setsigdefault=uninstrumented\nfun:posix_spawnattr_setsigmask=uninstrumented\nfun:posix_spawnp=uninstrumented\nfun:pow=uninstrumented\nfun:pow10=uninstrumented\nfun:pow10f=uninstrumented\nfun:pow10l=uninstrumented\nfun:powf=uninstrumented\nfun:powf128=uninstrumented\nfun:powf32=uninstrumented\nfun:powf32x=uninstrumented\nfun:powf64=uninstrumented\nfun:powf64x=uninstrumented\nfun:powl=uninstrumented\nfun:ppoll=uninstrumented\nfun:prctl=uninstrumented\nfun:pread=uninstrumented\nfun:pread64=uninstrumented\nfun:preadv=uninstrumented\nfun:preadv2=uninstrumented\nfun:preadv64=uninstrumented\nfun:preadv64v2=uninstrumented\nfun:printf=uninstrumented\nfun:printf_size=uninstrumented\nfun:printf_size_info=uninstrumented\nfun:prlimit=uninstrumented\nfun:prlimit64=uninstrumented\nfun:process_vm_readv=uninstrumented\nfun:process_vm_writev=uninstrumented\nfun:profil=uninstrumented\nfun:pselect=uninstrumented\nfun:psiginfo=uninstrumented\nfun:psignal=uninstrumented\nfun:pthread_atfork=uninstrumented\nfun:pthread_attr_destroy=uninstrumented\nfun:pthread_attr_getaffinity_np=uninstrumented\nfun:pthread_attr_getdetachstate=uninstrumented\nfun:pthread_attr_getguardsize=uninstrumented\nfun:pthread_attr_getinheritsched=uninstrumented\nfun:pthread_attr_getschedparam=uninstrumented\nfun:pthread_attr_getschedpolicy=uninstrumented\nfun:pthread_attr_getscope=uninstrumented\nfun:pthread_attr_getstack=uninstrumented\nfun:pthread_attr_getstackaddr=uninstrumented\nfun:pthread_attr_getstacksize=uninstrumented\nfun:pthread_attr_init=uninstrumented\nfun:pthread_attr_setaffinity_np=uninstrumented\nfun:pthread_attr_setdetachstate=uninstrumented\nfun:pthread_attr_setguardsize=uninstrumented\nfun:pthread_attr_setinheritsched=uninstrumented\nfun:pthread_attr_setschedparam=uninstrumented\nfun:pthread_attr_setschedpolicy=uninstrumented\nfun:pthread_attr_setscope=uninstrumented\nfun:pthread_attr_setstack=uninstrumented\nfun:pthread_attr_setstackaddr=uninstrumented\nfun:pthread_attr_setstacksize=uninstrumented\nfun:pthread_barrier_destroy=uninstrumented\nfun:pthread_barrier_init=uninstrumented\nfun:pthread_barrier_wait=uninstrumented\nfun:pthread_barrierattr_destroy=uninstrumented\nfun:pthread_barrierattr_getpshared=uninstrumented\nfun:pthread_barrierattr_init=uninstrumented\nfun:pthread_barrierattr_setpshared=uninstrumented\nfun:pthread_cancel=uninstrumented\nfun:pthread_cond_broadcast=uninstrumented\nfun:pthread_cond_destroy=uninstrumented\nfun:pthread_cond_init=uninstrumented\nfun:pthread_cond_signal=uninstrumented\nfun:pthread_cond_timedwait=uninstrumented\nfun:pthread_cond_wait=uninstrumented\nfun:pthread_condattr_destroy=uninstrumented\nfun:pthread_condattr_getclock=uninstrumented\nfun:pthread_condattr_getpshared=uninstrumented\nfun:pthread_condattr_init=uninstrumented\nfun:pthread_condattr_setclock=uninstrumented\nfun:pthread_condattr_setpshared=uninstrumented\nfun:pthread_create=uninstrumented\nfun:pthread_detach=uninstrumented\nfun:pthread_equal=uninstrumented\nfun:pthread_exit=uninstrumented\nfun:pthread_getaffinity_np=uninstrumented\nfun:pthread_getattr_default_np=uninstrumented\nfun:pthread_getattr_np=uninstrumented\nfun:pthread_getconcurrency=uninstrumented\nfun:pthread_getcpuclockid=uninstrumented\nfun:pthread_getname_np=uninstrumented\nfun:pthread_getschedparam=uninstrumented\nfun:pthread_getspecific=uninstrumented\nfun:pthread_join=uninstrumented\nfun:pthread_key_create=uninstrumented\nfun:pthread_key_delete=uninstrumented\nfun:pthread_kill=uninstrumented\nfun:pthread_kill_other_threads_np=uninstrumented\nfun:pthread_mutex_consistent=uninstrumented\nfun:pthread_mutex_consistent_np=uninstrumented\nfun:pthread_mutex_destroy=uninstrumented\nfun:pthread_mutex_getprioceiling=uninstrumented\nfun:pthread_mutex_init=uninstrumented\nfun:pthread_mutex_lock=uninstrumented\nfun:pthread_mutex_setprioceiling=uninstrumented\nfun:pthread_mutex_timedlock=uninstrumented\nfun:pthread_mutex_trylock=uninstrumented\nfun:pthread_mutex_unlock=uninstrumented\nfun:pthread_mutexattr_destroy=uninstrumented\nfun:pthread_mutexattr_getkind_np=uninstrumented\nfun:pthread_mutexattr_getprioceiling=uninstrumented\nfun:pthread_mutexattr_getprotocol=uninstrumented\nfun:pthread_mutexattr_getpshared=uninstrumented\nfun:pthread_mutexattr_getrobust=uninstrumented\nfun:pthread_mutexattr_getrobust_np=uninstrumented\nfun:pthread_mutexattr_gettype=uninstrumented\nfun:pthread_mutexattr_init=uninstrumented\nfun:pthread_mutexattr_setkind_np=uninstrumented\nfun:pthread_mutexattr_setprioceiling=uninstrumented\nfun:pthread_mutexattr_setprotocol=uninstrumented\nfun:pthread_mutexattr_setpshared=uninstrumented\nfun:pthread_mutexattr_setrobust=uninstrumented\nfun:pthread_mutexattr_setrobust_np=uninstrumented\nfun:pthread_mutexattr_settype=uninstrumented\nfun:pthread_once=uninstrumented\nfun:pthread_rwlock_destroy=uninstrumented\nfun:pthread_rwlock_init=uninstrumented\nfun:pthread_rwlock_rdlock=uninstrumented\nfun:pthread_rwlock_timedrdlock=uninstrumented\nfun:pthread_rwlock_timedwrlock=uninstrumented\nfun:pthread_rwlock_tryrdlock=uninstrumented\nfun:pthread_rwlock_trywrlock=uninstrumented\nfun:pthread_rwlock_unlock=uninstrumented\nfun:pthread_rwlock_wrlock=uninstrumented\nfun:pthread_rwlockattr_destroy=uninstrumented\nfun:pthread_rwlockattr_getkind_np=uninstrumented\nfun:pthread_rwlockattr_getpshared=uninstrumented\nfun:pthread_rwlockattr_init=uninstrumented\nfun:pthread_rwlockattr_setkind_np=uninstrumented\nfun:pthread_rwlockattr_setpshared=uninstrumented\nfun:pthread_self=uninstrumented\nfun:pthread_setaffinity_np=uninstrumented\nfun:pthread_setattr_default_np=uninstrumented\nfun:pthread_setcancelstate=uninstrumented\nfun:pthread_setcanceltype=uninstrumented\nfun:pthread_setconcurrency=uninstrumented\nfun:pthread_setname_np=uninstrumented\nfun:pthread_setschedparam=uninstrumented\nfun:pthread_setschedprio=uninstrumented\nfun:pthread_setspecific=uninstrumented\nfun:pthread_sigmask=uninstrumented\nfun:pthread_sigqueue=uninstrumented\nfun:pthread_spin_destroy=uninstrumented\nfun:pthread_spin_init=uninstrumented\nfun:pthread_spin_lock=uninstrumented\nfun:pthread_spin_trylock=uninstrumented\nfun:pthread_spin_unlock=uninstrumented\nfun:pthread_testcancel=uninstrumented\nfun:pthread_timedjoin_np=uninstrumented\nfun:pthread_tryjoin_np=uninstrumented\nfun:pthread_yield=uninstrumented\nfun:ptrace=uninstrumented\nfun:ptsname=uninstrumented\nfun:ptsname_r=uninstrumented\nfun:putc=uninstrumented\nfun:putc_unlocked=uninstrumented\nfun:putchar=uninstrumented\nfun:putchar_unlocked=uninstrumented\nfun:putenv=uninstrumented\nfun:putgrent=uninstrumented\nfun:putmsg=uninstrumented\nfun:putpmsg=uninstrumented\nfun:putpwent=uninstrumented\nfun:puts=uninstrumented\nfun:putsgent=uninstrumented\nfun:putspent=uninstrumented\nfun:pututline=uninstrumented\nfun:pututxline=uninstrumented\nfun:putw=uninstrumented\nfun:putwc=uninstrumented\nfun:putwc_unlocked=uninstrumented\nfun:putwchar=uninstrumented\nfun:putwchar_unlocked=uninstrumented\nfun:pvalloc=uninstrumented\nfun:pwrite=uninstrumented\nfun:pwrite64=uninstrumented\nfun:pwritev=uninstrumented\nfun:pwritev2=uninstrumented\nfun:pwritev64=uninstrumented\nfun:pwritev64v2=uninstrumented\nfun:qecvt=uninstrumented\nfun:qecvt_r=uninstrumented\nfun:qfcvt=uninstrumented\nfun:qfcvt_r=uninstrumented\nfun:qgcvt=uninstrumented\nfun:qsort=uninstrumented\nfun:qsort_r=uninstrumented\nfun:query_module=uninstrumented\nfun:quick_exit=uninstrumented\nfun:quotactl=uninstrumented\nfun:raise=uninstrumented\nfun:rand=uninstrumented\nfun:rand_r=uninstrumented\nfun:random=uninstrumented\nfun:random_r=uninstrumented\nfun:rawmemchr=uninstrumented\nfun:rcmd=uninstrumented\nfun:rcmd_af=uninstrumented\nfun:re_comp=uninstrumented\nfun:re_compile_fastmap=uninstrumented\nfun:re_compile_pattern=uninstrumented\nfun:re_exec=uninstrumented\nfun:re_match=uninstrumented\nfun:re_match_2=uninstrumented\nfun:re_search=uninstrumented\nfun:re_search_2=uninstrumented\nfun:re_set_registers=uninstrumented\nfun:re_set_syntax=uninstrumented\nfun:read=uninstrumented\nfun:readColdStartFile=uninstrumented\nfun:readahead=uninstrumented\nfun:readdir=uninstrumented\nfun:readdir64=uninstrumented\nfun:readdir64_r=uninstrumented\nfun:readdir_r=uninstrumented\nfun:readlink=uninstrumented\nfun:readlinkat=uninstrumented\nfun:readv=uninstrumented\nfun:realloc=uninstrumented\nfun:reallocarray=uninstrumented\nfun:realpath=uninstrumented\nfun:reboot=uninstrumented\nfun:recv=uninstrumented\nfun:recvfrom=uninstrumented\nfun:recvmmsg=uninstrumented\nfun:recvmsg=uninstrumented\nfun:regcomp=uninstrumented\nfun:regerror=uninstrumented\nfun:regexec=uninstrumented\nfun:regfree=uninstrumented\nfun:register_printf_function=uninstrumented\nfun:register_printf_modifier=uninstrumented\nfun:register_printf_specifier=uninstrumented\nfun:register_printf_type=uninstrumented\nfun:registerrpc=uninstrumented\nfun:remainder=uninstrumented\nfun:remainderf=uninstrumented\nfun:remainderf128=uninstrumented\nfun:remainderf32=uninstrumented\nfun:remainderf32x=uninstrumented\nfun:remainderf64=uninstrumented\nfun:remainderf64x=uninstrumented\nfun:remainderl=uninstrumented\nfun:remap_file_pages=uninstrumented\nfun:remove=uninstrumented\nfun:removexattr=uninstrumented\nfun:remque=uninstrumented\nfun:remquo=uninstrumented\nfun:remquof=uninstrumented\nfun:remquof128=uninstrumented\nfun:remquof32=uninstrumented\nfun:remquof32x=uninstrumented\nfun:remquof64=uninstrumented\nfun:remquof64x=uninstrumented\nfun:remquol=uninstrumented\nfun:rename=uninstrumented\nfun:renameat=uninstrumented\nfun:res_gethostbyaddr=uninstrumented\nfun:res_gethostbyname=uninstrumented\nfun:res_gethostbyname2=uninstrumented\nfun:res_send_setqhook=uninstrumented\nfun:res_send_setrhook=uninstrumented\nfun:revoke=uninstrumented\nfun:rewind=uninstrumented\nfun:rewinddir=uninstrumented\nfun:rexec=uninstrumented\nfun:rexec_af=uninstrumented\nfun:rindex=uninstrumented\nfun:rint=uninstrumented\nfun:rintf=uninstrumented\nfun:rintf128=uninstrumented\nfun:rintf32=uninstrumented\nfun:rintf32x=uninstrumented\nfun:rintf64=uninstrumented\nfun:rintf64x=uninstrumented\nfun:rintl=uninstrumented\nfun:rmdir=uninstrumented\nfun:round=uninstrumented\nfun:roundeven=uninstrumented\nfun:roundevenf=uninstrumented\nfun:roundevenf128=uninstrumented\nfun:roundevenf32=uninstrumented\nfun:roundevenf32x=uninstrumented\nfun:roundevenf64=uninstrumented\nfun:roundevenf64x=uninstrumented\nfun:roundevenl=uninstrumented\nfun:roundf=uninstrumented\nfun:roundf128=uninstrumented\nfun:roundf32=uninstrumented\nfun:roundf32x=uninstrumented\nfun:roundf64=uninstrumented\nfun:roundf64x=uninstrumented\nfun:roundl=uninstrumented\nfun:rpmatch=uninstrumented\nfun:rresvport=uninstrumented\nfun:rresvport_af=uninstrumented\nfun:rtime=uninstrumented\nfun:ruserok=uninstrumented\nfun:ruserok_af=uninstrumented\nfun:ruserpass=uninstrumented\nfun:sbrk=uninstrumented\nfun:scalb=uninstrumented\nfun:scalbf=uninstrumented\nfun:scalbl=uninstrumented\nfun:scalbln=uninstrumented\nfun:scalblnf=uninstrumented\nfun:scalblnf128=uninstrumented\nfun:scalblnf32=uninstrumented\nfun:scalblnf32x=uninstrumented\nfun:scalblnf64=uninstrumented\nfun:scalblnf64x=uninstrumented\nfun:scalblnl=uninstrumented\nfun:scalbn=uninstrumented\nfun:scalbnf=uninstrumented\nfun:scalbnf128=uninstrumented\nfun:scalbnf32=uninstrumented\nfun:scalbnf32x=uninstrumented\nfun:scalbnf64=uninstrumented\nfun:scalbnf64x=uninstrumented\nfun:scalbnl=uninstrumented\nfun:scandir=uninstrumented\nfun:scandir64=uninstrumented\nfun:scandirat=uninstrumented\nfun:scandirat64=uninstrumented\nfun:scanf=uninstrumented\nfun:sched_get_priority_max=uninstrumented\nfun:sched_get_priority_min=uninstrumented\nfun:sched_getaffinity=uninstrumented\nfun:sched_getcpu=uninstrumented\nfun:sched_getparam=uninstrumented\nfun:sched_getscheduler=uninstrumented\nfun:sched_rr_get_interval=uninstrumented\nfun:sched_setaffinity=uninstrumented\nfun:sched_setparam=uninstrumented\nfun:sched_setscheduler=uninstrumented\nfun:sched_yield=uninstrumented\nfun:secure_getenv=uninstrumented\nfun:seed48=uninstrumented\nfun:seed48_r=uninstrumented\nfun:seekdir=uninstrumented\nfun:select=uninstrumented\nfun:sem_close=uninstrumented\nfun:sem_destroy=uninstrumented\nfun:sem_getvalue=uninstrumented\nfun:sem_init=uninstrumented\nfun:sem_open=uninstrumented\nfun:sem_post=uninstrumented\nfun:sem_timedwait=uninstrumented\nfun:sem_trywait=uninstrumented\nfun:sem_unlink=uninstrumented\nfun:sem_wait=uninstrumented\nfun:semctl=uninstrumented\nfun:semget=uninstrumented\nfun:semop=uninstrumented\nfun:semtimedop=uninstrumented\nfun:send=uninstrumented\nfun:sendfile=uninstrumented\nfun:sendfile64=uninstrumented\nfun:sendmmsg=uninstrumented\nfun:sendmsg=uninstrumented\nfun:sendto=uninstrumented\nfun:setaliasent=uninstrumented\nfun:setbuf=uninstrumented\nfun:setbuffer=uninstrumented\nfun:setcontext=uninstrumented\nfun:setdomainname=uninstrumented\nfun:setegid=uninstrumented\nfun:setenv=uninstrumented\nfun:seteuid=uninstrumented\nfun:setfsent=uninstrumented\nfun:setfsgid=uninstrumented\nfun:setfsuid=uninstrumented\nfun:setgid=uninstrumented\nfun:setgrent=uninstrumented\nfun:setgroups=uninstrumented\nfun:sethostent=uninstrumented\nfun:sethostid=uninstrumented\nfun:sethostname=uninstrumented\nfun:setipv4sourcefilter=uninstrumented\nfun:setitimer=uninstrumented\nfun:setjmp=uninstrumented\nfun:setkey=uninstrumented\nfun:setkey_r=uninstrumented\nfun:setlinebuf=uninstrumented\nfun:setlocale=uninstrumented\nfun:setlogin=uninstrumented\nfun:setlogmask=uninstrumented\nfun:setmntent=uninstrumented\nfun:setnetent=uninstrumented\nfun:setnetgrent=uninstrumented\nfun:setns=uninstrumented\nfun:setpayload=uninstrumented\nfun:setpayloadf=uninstrumented\nfun:setpayloadf128=uninstrumented\nfun:setpayloadf32=uninstrumented\nfun:setpayloadf32x=uninstrumented\nfun:setpayloadf64=uninstrumented\nfun:setpayloadf64x=uninstrumented\nfun:setpayloadl=uninstrumented\nfun:setpayloadsig=uninstrumented\nfun:setpayloadsigf=uninstrumented\nfun:setpayloadsigf128=uninstrumented\nfun:setpayloadsigf32=uninstrumented\nfun:setpayloadsigf32x=uninstrumented\nfun:setpayloadsigf64=uninstrumented\nfun:setpayloadsigf64x=uninstrumented\nfun:setpayloadsigl=uninstrumented\nfun:setpgid=uninstrumented\nfun:setpgrp=uninstrumented\nfun:setpriority=uninstrumented\nfun:setprotoent=uninstrumented\nfun:setpwent=uninstrumented\nfun:setregid=uninstrumented\nfun:setresgid=uninstrumented\nfun:setresuid=uninstrumented\nfun:setreuid=uninstrumented\nfun:setrlimit=uninstrumented\nfun:setrlimit64=uninstrumented\nfun:setrpcent=uninstrumented\nfun:setservent=uninstrumented\nfun:setsgent=uninstrumented\nfun:setsid=uninstrumented\nfun:setsockopt=uninstrumented\nfun:setsourcefilter=uninstrumented\nfun:setspent=uninstrumented\nfun:setstate=uninstrumented\nfun:setstate_r=uninstrumented\nfun:settimeofday=uninstrumented\nfun:setttyent=uninstrumented\nfun:setuid=uninstrumented\nfun:setusershell=uninstrumented\nfun:setutent=uninstrumented\nfun:setutxent=uninstrumented\nfun:setvbuf=uninstrumented\nfun:setxattr=uninstrumented\nfun:sgetsgent=uninstrumented\nfun:sgetsgent_r=uninstrumented\nfun:sgetspent=uninstrumented\nfun:sgetspent_r=uninstrumented\nfun:shm_open=uninstrumented\nfun:shm_unlink=uninstrumented\nfun:shmat=uninstrumented\nfun:shmctl=uninstrumented\nfun:shmdt=uninstrumented\nfun:shmget=uninstrumented\nfun:shutdown=uninstrumented\nfun:sigaction=uninstrumented\nfun:sigaddset=uninstrumented\nfun:sigaltstack=uninstrumented\nfun:sigandset=uninstrumented\nfun:sigblock=uninstrumented\nfun:sigdelset=uninstrumented\nfun:sigemptyset=uninstrumented\nfun:sigfillset=uninstrumented\nfun:siggetmask=uninstrumented\nfun:sighold=uninstrumented\nfun:sigignore=uninstrumented\nfun:siginterrupt=uninstrumented\nfun:sigisemptyset=uninstrumented\nfun:sigismember=uninstrumented\nfun:siglongjmp=uninstrumented\nfun:signal=uninstrumented\nfun:signalfd=uninstrumented\nfun:significand=uninstrumented\nfun:significandf=uninstrumented\nfun:significandl=uninstrumented\nfun:sigorset=uninstrumented\nfun:sigpause=uninstrumented\nfun:sigpending=uninstrumented\nfun:sigprocmask=uninstrumented\nfun:sigqueue=uninstrumented\nfun:sigrelse=uninstrumented\nfun:sigreturn=uninstrumented\nfun:sigset=uninstrumented\nfun:sigsetmask=uninstrumented\nfun:sigstack=uninstrumented\nfun:sigsuspend=uninstrumented\nfun:sigtimedwait=uninstrumented\nfun:sigvec=uninstrumented\nfun:sigwait=uninstrumented\nfun:sigwaitinfo=uninstrumented\nfun:sin=uninstrumented\nfun:sincos=uninstrumented\nfun:sincosf=uninstrumented\nfun:sincosf128=uninstrumented\nfun:sincosf32=uninstrumented\nfun:sincosf32x=uninstrumented\nfun:sincosf64=uninstrumented\nfun:sincosf64x=uninstrumented\nfun:sincosl=uninstrumented\nfun:sinf=uninstrumented\nfun:sinf128=uninstrumented\nfun:sinf32=uninstrumented\nfun:sinf32x=uninstrumented\nfun:sinf64=uninstrumented\nfun:sinf64x=uninstrumented\nfun:sinh=uninstrumented\nfun:sinhf=uninstrumented\nfun:sinhf128=uninstrumented\nfun:sinhf32=uninstrumented\nfun:sinhf32x=uninstrumented\nfun:sinhf64=uninstrumented\nfun:sinhf64x=uninstrumented\nfun:sinhl=uninstrumented\nfun:sinl=uninstrumented\nfun:sleep=uninstrumented\nfun:snprintf=uninstrumented\nfun:sockatmark=uninstrumented\nfun:socket=uninstrumented\nfun:socketpair=uninstrumented\nfun:splice=uninstrumented\nfun:sprintf=uninstrumented\nfun:sprofil=uninstrumented\nfun:sqrt=uninstrumented\nfun:sqrtf=uninstrumented\nfun:sqrtf128=uninstrumented\nfun:sqrtf32=uninstrumented\nfun:sqrtf32x=uninstrumented\nfun:sqrtf64=uninstrumented\nfun:sqrtf64x=uninstrumented\nfun:sqrtl=uninstrumented\nfun:srand=uninstrumented\nfun:srand48=uninstrumented\nfun:srand48_r=uninstrumented\nfun:srandom=uninstrumented\nfun:srandom_r=uninstrumented\nfun:sscanf=uninstrumented\nfun:ssignal=uninstrumented\nfun:sstk=uninstrumented\nfun:stat=uninstrumented\nfun:stat64=uninstrumented\nfun:statfs=uninstrumented\nfun:statfs64=uninstrumented\nfun:statvfs=uninstrumented\nfun:statvfs64=uninstrumented\nfun:step=uninstrumented\nfun:stime=uninstrumented\nfun:stpcpy=uninstrumented\nfun:stpncpy=uninstrumented\nfun:strcasecmp=uninstrumented\nfun:strcasecmp_l=uninstrumented\nfun:strcasestr=uninstrumented\nfun:strcat=uninstrumented\nfun:strchr=uninstrumented\nfun:strchrnul=uninstrumented\nfun:strcmp=uninstrumented\nfun:strcoll=uninstrumented\nfun:strcoll_l=uninstrumented\nfun:strcpy=uninstrumented\nfun:strcspn=uninstrumented\nfun:strdup=uninstrumented\nfun:strerror=uninstrumented\nfun:strerror_l=uninstrumented\nfun:strerror_r=uninstrumented\nfun:strfmon=uninstrumented\nfun:strfmon_l=uninstrumented\nfun:strfromd=uninstrumented\nfun:strfromf=uninstrumented\nfun:strfromf128=uninstrumented\nfun:strfromf32=uninstrumented\nfun:strfromf32x=uninstrumented\nfun:strfromf64=uninstrumented\nfun:strfromf64x=uninstrumented\nfun:strfroml=uninstrumented\nfun:strfry=uninstrumented\nfun:strftime=uninstrumented\nfun:strftime_l=uninstrumented\nfun:strlen=uninstrumented\nfun:strncasecmp=uninstrumented\nfun:strncasecmp_l=uninstrumented\nfun:strncat=uninstrumented\nfun:strncmp=uninstrumented\nfun:strncpy=uninstrumented\nfun:strndup=uninstrumented\nfun:strnlen=uninstrumented\nfun:strpbrk=uninstrumented\nfun:strptime=uninstrumented\nfun:strptime_l=uninstrumented\nfun:strrchr=uninstrumented\nfun:strsep=uninstrumented\nfun:strsignal=uninstrumented\nfun:strspn=uninstrumented\nfun:strstr=uninstrumented\nfun:strtod=uninstrumented\nfun:strtod_l=uninstrumented\nfun:strtof=uninstrumented\nfun:strtof128=uninstrumented\nfun:strtof128_l=uninstrumented\nfun:strtof32=uninstrumented\nfun:strtof32_l=uninstrumented\nfun:strtof32x=uninstrumented\nfun:strtof32x_l=uninstrumented\nfun:strtof64=uninstrumented\nfun:strtof64_l=uninstrumented\nfun:strtof64x=uninstrumented\nfun:strtof64x_l=uninstrumented\nfun:strtof_l=uninstrumented\nfun:strtoimax=uninstrumented\nfun:strtok=uninstrumented\nfun:strtok_r=uninstrumented\nfun:strtol=uninstrumented\nfun:strtol_l=uninstrumented\nfun:strtold=uninstrumented\nfun:strtold_l=uninstrumented\nfun:strtoll=uninstrumented\nfun:strtoll_l=uninstrumented\nfun:strtoq=uninstrumented\nfun:strtoul=uninstrumented\nfun:strtoul_l=uninstrumented\nfun:strtoull=uninstrumented\nfun:strtoull_l=uninstrumented\nfun:strtoumax=uninstrumented\nfun:strtouq=uninstrumented\nfun:strverscmp=uninstrumented\nfun:strxfrm=uninstrumented\nfun:strxfrm_l=uninstrumented\nfun:stty=uninstrumented\nfun:svc_exit=uninstrumented\nfun:svc_getreq=uninstrumented\nfun:svc_getreq_common=uninstrumented\nfun:svc_getreq_poll=uninstrumented\nfun:svc_getreqset=uninstrumented\nfun:svc_register=uninstrumented\nfun:svc_run=uninstrumented\nfun:svc_sendreply=uninstrumented\nfun:svc_unregister=uninstrumented\nfun:svcerr_auth=uninstrumented\nfun:svcerr_decode=uninstrumented\nfun:svcerr_noproc=uninstrumented\nfun:svcerr_noprog=uninstrumented\nfun:svcerr_progvers=uninstrumented\nfun:svcerr_systemerr=uninstrumented\nfun:svcerr_weakauth=uninstrumented\nfun:svcfd_create=uninstrumented\nfun:svcraw_create=uninstrumented\nfun:svctcp_create=uninstrumented\nfun:svcudp_bufcreate=uninstrumented\nfun:svcudp_create=uninstrumented\nfun:svcudp_enablecache=uninstrumented\nfun:svcunix_create=uninstrumented\nfun:svcunixfd_create=uninstrumented\nfun:swab=uninstrumented\nfun:swapcontext=uninstrumented\nfun:swapoff=uninstrumented\nfun:swapon=uninstrumented\nfun:swprintf=uninstrumented\nfun:swscanf=uninstrumented\nfun:symlink=uninstrumented\nfun:symlinkat=uninstrumented\nfun:sync=uninstrumented\nfun:sync_file_range=uninstrumented\nfun:syncfs=uninstrumented\nfun:syscall=uninstrumented\nfun:sysconf=uninstrumented\nfun:sysctl=uninstrumented\nfun:sysinfo=uninstrumented\nfun:syslog=uninstrumented\nfun:system=uninstrumented\nfun:sysv_signal=uninstrumented\nfun:tan=uninstrumented\nfun:tanf=uninstrumented\nfun:tanf128=uninstrumented\nfun:tanf32=uninstrumented\nfun:tanf32x=uninstrumented\nfun:tanf64=uninstrumented\nfun:tanf64x=uninstrumented\nfun:tanh=uninstrumented\nfun:tanhf=uninstrumented\nfun:tanhf128=uninstrumented\nfun:tanhf32=uninstrumented\nfun:tanhf32x=uninstrumented\nfun:tanhf64=uninstrumented\nfun:tanhf64x=uninstrumented\nfun:tanhl=uninstrumented\nfun:tanl=uninstrumented\nfun:tcdrain=uninstrumented\nfun:tcflow=uninstrumented\nfun:tcflush=uninstrumented\nfun:tcgetattr=uninstrumented\nfun:tcgetpgrp=uninstrumented\nfun:tcgetsid=uninstrumented\nfun:tcsendbreak=uninstrumented\nfun:tcsetattr=uninstrumented\nfun:tcsetpgrp=uninstrumented\nfun:td_init=uninstrumented\nfun:td_log=uninstrumented\nfun:td_symbol_list=uninstrumented\nfun:td_ta_clear_event=uninstrumented\nfun:td_ta_delete=uninstrumented\nfun:td_ta_enable_stats=uninstrumented\nfun:td_ta_event_addr=uninstrumented\nfun:td_ta_event_getmsg=uninstrumented\nfun:td_ta_get_nthreads=uninstrumented\nfun:td_ta_get_ph=uninstrumented\nfun:td_ta_get_stats=uninstrumented\nfun:td_ta_map_id2thr=uninstrumented\nfun:td_ta_map_lwp2thr=uninstrumented\nfun:td_ta_new=uninstrumented\nfun:td_ta_reset_stats=uninstrumented\nfun:td_ta_set_event=uninstrumented\nfun:td_ta_setconcurrency=uninstrumented\nfun:td_ta_thr_iter=uninstrumented\nfun:td_ta_tsd_iter=uninstrumented\nfun:td_thr_clear_event=uninstrumented\nfun:td_thr_dbresume=uninstrumented\nfun:td_thr_dbsuspend=uninstrumented\nfun:td_thr_event_enable=uninstrumented\nfun:td_thr_event_getmsg=uninstrumented\nfun:td_thr_get_info=uninstrumented\nfun:td_thr_getfpregs=uninstrumented\nfun:td_thr_getgregs=uninstrumented\nfun:td_thr_getxregs=uninstrumented\nfun:td_thr_getxregsize=uninstrumented\nfun:td_thr_set_event=uninstrumented\nfun:td_thr_setfpregs=uninstrumented\nfun:td_thr_setgregs=uninstrumented\nfun:td_thr_setprio=uninstrumented\nfun:td_thr_setsigpending=uninstrumented\nfun:td_thr_setxregs=uninstrumented\nfun:td_thr_sigsetmask=uninstrumented\nfun:td_thr_tls_get_addr=uninstrumented\nfun:td_thr_tlsbase=uninstrumented\nfun:td_thr_tsd=uninstrumented\nfun:td_thr_validate=uninstrumented\nfun:tdelete=uninstrumented\nfun:tdestroy=uninstrumented\nfun:tee=uninstrumented\nfun:telldir=uninstrumented\nfun:tempnam=uninstrumented\nfun:textdomain=uninstrumented\nfun:tfind=uninstrumented\nfun:tgamma=uninstrumented\nfun:tgammaf=uninstrumented\nfun:tgammaf128=uninstrumented\nfun:tgammaf32=uninstrumented\nfun:tgammaf32x=uninstrumented\nfun:tgammaf64=uninstrumented\nfun:tgammaf64x=uninstrumented\nfun:tgammal=uninstrumented\nfun:time=uninstrumented\nfun:timegm=uninstrumented\nfun:timelocal=uninstrumented\nfun:timer_create=uninstrumented\nfun:timer_delete=uninstrumented\nfun:timer_getoverrun=uninstrumented\nfun:timer_gettime=uninstrumented\nfun:timer_settime=uninstrumented\nfun:timerfd_create=uninstrumented\nfun:timerfd_gettime=uninstrumented\nfun:timerfd_settime=uninstrumented\nfun:times=uninstrumented\nfun:timespec_get=uninstrumented\nfun:tmpfile=uninstrumented\nfun:tmpfile64=uninstrumented\nfun:tmpnam=uninstrumented\nfun:tmpnam_r=uninstrumented\nfun:toascii=uninstrumented\nfun:tolower=uninstrumented\nfun:tolower_l=uninstrumented\nfun:totalorder=uninstrumented\nfun:totalorderf=uninstrumented\nfun:totalorderf128=uninstrumented\nfun:totalorderf32=uninstrumented\nfun:totalorderf32x=uninstrumented\nfun:totalorderf64=uninstrumented\nfun:totalorderf64x=uninstrumented\nfun:totalorderl=uninstrumented\nfun:totalordermag=uninstrumented\nfun:totalordermagf=uninstrumented\nfun:totalordermagf128=uninstrumented\nfun:totalordermagf32=uninstrumented\nfun:totalordermagf32x=uninstrumented\nfun:totalordermagf64=uninstrumented\nfun:totalordermagf64x=uninstrumented\nfun:totalordermagl=uninstrumented\nfun:toupper=uninstrumented\nfun:toupper_l=uninstrumented\nfun:towctrans=uninstrumented\nfun:towctrans_l=uninstrumented\nfun:towlower=uninstrumented\nfun:towlower_l=uninstrumented\nfun:towupper=uninstrumented\nfun:towupper_l=uninstrumented\nfun:tr_break=uninstrumented\nfun:trunc=uninstrumented\nfun:truncate=uninstrumented\nfun:truncate64=uninstrumented\nfun:truncf=uninstrumented\nfun:truncf128=uninstrumented\nfun:truncf32=uninstrumented\nfun:truncf32x=uninstrumented\nfun:truncf64=uninstrumented\nfun:truncf64x=uninstrumented\nfun:truncl=uninstrumented\nfun:tsearch=uninstrumented\nfun:ttyname=uninstrumented\nfun:ttyname_r=uninstrumented\nfun:ttyslot=uninstrumented\nfun:twalk=uninstrumented\nfun:tzset=uninstrumented\nfun:ualarm=uninstrumented\nfun:ufromfp=uninstrumented\nfun:ufromfpf=uninstrumented\nfun:ufromfpf128=uninstrumented\nfun:ufromfpf32=uninstrumented\nfun:ufromfpf32x=uninstrumented\nfun:ufromfpf64=uninstrumented\nfun:ufromfpf64x=uninstrumented\nfun:ufromfpl=uninstrumented\nfun:ufromfpx=uninstrumented\nfun:ufromfpxf=uninstrumented\nfun:ufromfpxf128=uninstrumented\nfun:ufromfpxf32=uninstrumented\nfun:ufromfpxf32x=uninstrumented\nfun:ufromfpxf64=uninstrumented\nfun:ufromfpxf64x=uninstrumented\nfun:ufromfpxl=uninstrumented\nfun:ulckpwdf=uninstrumented\nfun:ulimit=uninstrumented\nfun:umask=uninstrumented\nfun:umount=uninstrumented\nfun:umount2=uninstrumented\nfun:uname=uninstrumented\nfun:ungetc=uninstrumented\nfun:ungetwc=uninstrumented\nfun:unlink=uninstrumented\nfun:unlinkat=uninstrumented\nfun:unlockpt=uninstrumented\nfun:unsetenv=uninstrumented\nfun:unshare=uninstrumented\nfun:updwtmp=uninstrumented\nfun:updwtmpx=uninstrumented\nfun:uselib=uninstrumented\nfun:uselocale=uninstrumented\nfun:user2netname=uninstrumented\nfun:usleep=uninstrumented\nfun:ustat=uninstrumented\nfun:utime=uninstrumented\nfun:utimensat=uninstrumented\nfun:utimes=uninstrumented\nfun:utmpname=uninstrumented\nfun:utmpxname=uninstrumented\nfun:valloc=uninstrumented\nfun:vasprintf=uninstrumented\nfun:vdprintf=uninstrumented\nfun:verr=uninstrumented\nfun:verrx=uninstrumented\nfun:versionsort=uninstrumented\nfun:versionsort64=uninstrumented\nfun:vfork=uninstrumented\nfun:vfprintf=uninstrumented\nfun:vfscanf=uninstrumented\nfun:vfwprintf=uninstrumented\nfun:vfwscanf=uninstrumented\nfun:vhangup=uninstrumented\nfun:vlimit=uninstrumented\nfun:vmsplice=uninstrumented\nfun:vprintf=uninstrumented\nfun:vscanf=uninstrumented\nfun:vsnprintf=uninstrumented\nfun:vsprintf=uninstrumented\nfun:vsscanf=uninstrumented\nfun:vswprintf=uninstrumented\nfun:vswscanf=uninstrumented\nfun:vsyslog=uninstrumented\nfun:vtimes=uninstrumented\nfun:vwarn=uninstrumented\nfun:vwarnx=uninstrumented\nfun:vwprintf=uninstrumented\nfun:vwscanf=uninstrumented\nfun:wait=uninstrumented\nfun:wait3=uninstrumented\nfun:wait4=uninstrumented\nfun:waitid=uninstrumented\nfun:waitpid=uninstrumented\nfun:warn=uninstrumented\nfun:warnx=uninstrumented\nfun:wcpcpy=uninstrumented\nfun:wcpncpy=uninstrumented\nfun:wcrtomb=uninstrumented\nfun:wcscasecmp=uninstrumented\nfun:wcscasecmp_l=uninstrumented\nfun:wcscat=uninstrumented\nfun:wcschr=uninstrumented\nfun:wcschrnul=uninstrumented\nfun:wcscmp=uninstrumented\nfun:wcscoll=uninstrumented\nfun:wcscoll_l=uninstrumented\nfun:wcscpy=uninstrumented\nfun:wcscspn=uninstrumented\nfun:wcsdup=uninstrumented\nfun:wcsftime=uninstrumented\nfun:wcsftime_l=uninstrumented\nfun:wcslen=uninstrumented\nfun:wcsncasecmp=uninstrumented\nfun:wcsncasecmp_l=uninstrumented\nfun:wcsncat=uninstrumented\nfun:wcsncmp=uninstrumented\nfun:wcsncpy=uninstrumented\nfun:wcsnlen=uninstrumented\nfun:wcsnrtombs=uninstrumented\nfun:wcspbrk=uninstrumented\nfun:wcsrchr=uninstrumented\nfun:wcsrtombs=uninstrumented\nfun:wcsspn=uninstrumented\nfun:wcsstr=uninstrumented\nfun:wcstod=uninstrumented\nfun:wcstod_l=uninstrumented\nfun:wcstof=uninstrumented\nfun:wcstof128=uninstrumented\nfun:wcstof128_l=uninstrumented\nfun:wcstof32=uninstrumented\nfun:wcstof32_l=uninstrumented\nfun:wcstof32x=uninstrumented\nfun:wcstof32x_l=uninstrumented\nfun:wcstof64=uninstrumented\nfun:wcstof64_l=uninstrumented\nfun:wcstof64x=uninstrumented\nfun:wcstof64x_l=uninstrumented\nfun:wcstof_l=uninstrumented\nfun:wcstoimax=uninstrumented\nfun:wcstok=uninstrumented\nfun:wcstol=uninstrumented\nfun:wcstol_l=uninstrumented\nfun:wcstold=uninstrumented\nfun:wcstold_l=uninstrumented\nfun:wcstoll=uninstrumented\nfun:wcstoll_l=uninstrumented\nfun:wcstombs=uninstrumented\nfun:wcstoq=uninstrumented\nfun:wcstoul=uninstrumented\nfun:wcstoul_l=uninstrumented\nfun:wcstoull=uninstrumented\nfun:wcstoull_l=uninstrumented\nfun:wcstoumax=uninstrumented\nfun:wcstouq=uninstrumented\nfun:wcswcs=uninstrumented\nfun:wcswidth=uninstrumented\nfun:wcsxfrm=uninstrumented\nfun:wcsxfrm_l=uninstrumented\nfun:wctob=uninstrumented\nfun:wctomb=uninstrumented\nfun:wctrans=uninstrumented\nfun:wctrans_l=uninstrumented\nfun:wctype=uninstrumented\nfun:wctype_l=uninstrumented\nfun:wcwidth=uninstrumented\nfun:wmemchr=uninstrumented\nfun:wmemcmp=uninstrumented\nfun:wmemcpy=uninstrumented\nfun:wmemmove=uninstrumented\nfun:wmempcpy=uninstrumented\nfun:wmemset=uninstrumented\nfun:wordexp=uninstrumented\nfun:wordfree=uninstrumented\nfun:wprintf=uninstrumented\nfun:write=uninstrumented\nfun:writeColdStartFile=uninstrumented\nfun:writev=uninstrumented\nfun:wscanf=uninstrumented\nfun:xdecrypt=uninstrumented\nfun:xdr_accepted_reply=uninstrumented\nfun:xdr_array=uninstrumented\nfun:xdr_authdes_cred=uninstrumented\nfun:xdr_authdes_verf=uninstrumented\nfun:xdr_authunix_parms=uninstrumented\nfun:xdr_bool=uninstrumented\nfun:xdr_bytes=uninstrumented\nfun:xdr_callhdr=uninstrumented\nfun:xdr_callmsg=uninstrumented\nfun:xdr_cback_data=uninstrumented\nfun:xdr_char=uninstrumented\nfun:xdr_cryptkeyarg=uninstrumented\nfun:xdr_cryptkeyarg2=uninstrumented\nfun:xdr_cryptkeyres=uninstrumented\nfun:xdr_des_block=uninstrumented\nfun:xdr_domainname=uninstrumented\nfun:xdr_double=uninstrumented\nfun:xdr_enum=uninstrumented\nfun:xdr_float=uninstrumented\nfun:xdr_free=uninstrumented\nfun:xdr_getcredres=uninstrumented\nfun:xdr_hyper=uninstrumented\nfun:xdr_int=uninstrumented\nfun:xdr_int16_t=uninstrumented\nfun:xdr_int32_t=uninstrumented\nfun:xdr_int64_t=uninstrumented\nfun:xdr_int8_t=uninstrumented\nfun:xdr_key_netstarg=uninstrumented\nfun:xdr_key_netstres=uninstrumented\nfun:xdr_keybuf=uninstrumented\nfun:xdr_keydat=uninstrumented\nfun:xdr_keystatus=uninstrumented\nfun:xdr_long=uninstrumented\nfun:xdr_longlong_t=uninstrumented\nfun:xdr_mapname=uninstrumented\nfun:xdr_netnamestr=uninstrumented\nfun:xdr_netobj=uninstrumented\nfun:xdr_obj_p=uninstrumented\nfun:xdr_opaque=uninstrumented\nfun:xdr_opaque_auth=uninstrumented\nfun:xdr_peername=uninstrumented\nfun:xdr_pmap=uninstrumented\nfun:xdr_pmaplist=uninstrumented\nfun:xdr_pointer=uninstrumented\nfun:xdr_quad_t=uninstrumented\nfun:xdr_reference=uninstrumented\nfun:xdr_rejected_reply=uninstrumented\nfun:xdr_replymsg=uninstrumented\nfun:xdr_rmtcall_args=uninstrumented\nfun:xdr_rmtcallres=uninstrumented\nfun:xdr_short=uninstrumented\nfun:xdr_sizeof=uninstrumented\nfun:xdr_string=uninstrumented\nfun:xdr_u_char=uninstrumented\nfun:xdr_u_hyper=uninstrumented\nfun:xdr_u_int=uninstrumented\nfun:xdr_u_long=uninstrumented\nfun:xdr_u_longlong_t=uninstrumented\nfun:xdr_u_quad_t=uninstrumented\nfun:xdr_u_short=uninstrumented\nfun:xdr_uint16_t=uninstrumented\nfun:xdr_uint32_t=uninstrumented\nfun:xdr_uint64_t=uninstrumented\nfun:xdr_uint8_t=uninstrumented\nfun:xdr_union=uninstrumented\nfun:xdr_unixcred=uninstrumented\nfun:xdr_valdat=uninstrumented\nfun:xdr_vector=uninstrumented\nfun:xdr_void=uninstrumented\nfun:xdr_wrapstring=uninstrumented\nfun:xdr_yp_buf=uninstrumented\nfun:xdr_ypall=uninstrumented\nfun:xdr_ypbind_binding=uninstrumented\nfun:xdr_ypbind_resp=uninstrumented\nfun:xdr_ypbind_resptype=uninstrumented\nfun:xdr_ypbind_setdom=uninstrumented\nfun:xdr_ypdelete_args=uninstrumented\nfun:xdr_ypmap_parms=uninstrumented\nfun:xdr_ypmaplist=uninstrumented\nfun:xdr_yppush_status=uninstrumented\nfun:xdr_yppushresp_xfr=uninstrumented\nfun:xdr_ypreq_key=uninstrumented\nfun:xdr_ypreq_nokey=uninstrumented\nfun:xdr_ypreq_xfr=uninstrumented\nfun:xdr_ypresp_all=uninstrumented\nfun:xdr_ypresp_key_val=uninstrumented\nfun:xdr_ypresp_maplist=uninstrumented\nfun:xdr_ypresp_master=uninstrumented\nfun:xdr_ypresp_order=uninstrumented\nfun:xdr_ypresp_val=uninstrumented\nfun:xdr_ypresp_xfr=uninstrumented\nfun:xdr_ypstat=uninstrumented\nfun:xdr_ypupdate_args=uninstrumented\nfun:xdr_ypxfrstat=uninstrumented\nfun:xdrmem_create=uninstrumented\nfun:xdrrec_create=uninstrumented\nfun:xdrrec_endofrecord=uninstrumented\nfun:xdrrec_eof=uninstrumented\nfun:xdrrec_skiprecord=uninstrumented\nfun:xdrstdio_create=uninstrumented\nfun:xencrypt=uninstrumented\nfun:xprt_register=uninstrumented\nfun:xprt_unregister=uninstrumented\nfun:y0=uninstrumented\nfun:y0f=uninstrumented\nfun:y0f128=uninstrumented\nfun:y0f32=uninstrumented\nfun:y0f32x=uninstrumented\nfun:y0f64=uninstrumented\nfun:y0f64x=uninstrumented\nfun:y0l=uninstrumented\nfun:y1=uninstrumented\nfun:y1f=uninstrumented\nfun:y1f128=uninstrumented\nfun:y1f32=uninstrumented\nfun:y1f32x=uninstrumented\nfun:y1f64=uninstrumented\nfun:y1f64x=uninstrumented\nfun:y1l=uninstrumented\nfun:yn=uninstrumented\nfun:ynf=uninstrumented\nfun:ynf128=uninstrumented\nfun:ynf32=uninstrumented\nfun:ynf32x=uninstrumented\nfun:ynf64=uninstrumented\nfun:ynf64x=uninstrumented\nfun:ynl=uninstrumented\nfun:yp_all=uninstrumented\nfun:yp_bind=uninstrumented\nfun:yp_first=uninstrumented\nfun:yp_get_default_domain=uninstrumented\nfun:yp_maplist=uninstrumented\nfun:yp_master=uninstrumented\nfun:yp_match=uninstrumented\nfun:yp_next=uninstrumented\nfun:yp_order=uninstrumented\nfun:yp_unbind=uninstrumented\nfun:yp_update=uninstrumented\nfun:ypbinderr_string=uninstrumented\nfun:yperr_string=uninstrumented\nfun:ypprot_err=uninstrumented\n"
  },
  {
    "path": "runtime/dfsan/libc_ubuntu2204_abilist.txt",
    "content": "fun:_Exit=uninstrumented\nfun:_Fork=uninstrumented\nfun:_IO_adjust_column=uninstrumented\nfun:_IO_adjust_wcolumn=uninstrumented\nfun:_IO_default_doallocate=uninstrumented\nfun:_IO_default_finish=uninstrumented\nfun:_IO_default_pbackfail=uninstrumented\nfun:_IO_default_uflow=uninstrumented\nfun:_IO_default_xsgetn=uninstrumented\nfun:_IO_default_xsputn=uninstrumented\nfun:_IO_do_write=uninstrumented\nfun:_IO_doallocbuf=uninstrumented\nfun:_IO_enable_locks=uninstrumented\nfun:_IO_fclose=uninstrumented\nfun:_IO_fdopen=uninstrumented\nfun:_IO_feof=uninstrumented\nfun:_IO_ferror=uninstrumented\nfun:_IO_fflush=uninstrumented\nfun:_IO_fgetpos=uninstrumented\nfun:_IO_fgetpos64=uninstrumented\nfun:_IO_fgets=uninstrumented\nfun:_IO_file_attach=uninstrumented\nfun:_IO_file_close=uninstrumented\nfun:_IO_file_close_it=uninstrumented\nfun:_IO_file_doallocate=uninstrumented\nfun:_IO_file_finish=uninstrumented\nfun:_IO_file_fopen=uninstrumented\nfun:_IO_file_init=uninstrumented\nfun:_IO_file_open=uninstrumented\nfun:_IO_file_overflow=uninstrumented\nfun:_IO_file_read=uninstrumented\nfun:_IO_file_seek=uninstrumented\nfun:_IO_file_seekoff=uninstrumented\nfun:_IO_file_setbuf=uninstrumented\nfun:_IO_file_stat=uninstrumented\nfun:_IO_file_sync=uninstrumented\nfun:_IO_file_underflow=uninstrumented\nfun:_IO_file_write=uninstrumented\nfun:_IO_file_xsputn=uninstrumented\nfun:_IO_flockfile=uninstrumented\nfun:_IO_flush_all=uninstrumented\nfun:_IO_flush_all_linebuffered=uninstrumented\nfun:_IO_fopen=uninstrumented\nfun:_IO_fprintf=uninstrumented\nfun:_IO_fputs=uninstrumented\nfun:_IO_fread=uninstrumented\nfun:_IO_free_backup_area=uninstrumented\nfun:_IO_free_wbackup_area=uninstrumented\nfun:_IO_fsetpos=uninstrumented\nfun:_IO_fsetpos64=uninstrumented\nfun:_IO_ftell=uninstrumented\nfun:_IO_ftrylockfile=uninstrumented\nfun:_IO_funlockfile=uninstrumented\nfun:_IO_fwrite=uninstrumented\nfun:_IO_getc=uninstrumented\nfun:_IO_getline=uninstrumented\nfun:_IO_getline_info=uninstrumented\nfun:_IO_gets=uninstrumented\nfun:_IO_init=uninstrumented\nfun:_IO_init_marker=uninstrumented\nfun:_IO_init_wmarker=uninstrumented\nfun:_IO_iter_begin=uninstrumented\nfun:_IO_iter_end=uninstrumented\nfun:_IO_iter_file=uninstrumented\nfun:_IO_iter_next=uninstrumented\nfun:_IO_least_wmarker=uninstrumented\nfun:_IO_link_in=uninstrumented\nfun:_IO_list_lock=uninstrumented\nfun:_IO_list_resetlock=uninstrumented\nfun:_IO_list_unlock=uninstrumented\nfun:_IO_marker_delta=uninstrumented\nfun:_IO_marker_difference=uninstrumented\nfun:_IO_padn=uninstrumented\nfun:_IO_peekc_locked=uninstrumented\nfun:_IO_popen=uninstrumented\nfun:_IO_printf=uninstrumented\nfun:_IO_proc_close=uninstrumented\nfun:_IO_proc_open=uninstrumented\nfun:_IO_putc=uninstrumented\nfun:_IO_puts=uninstrumented\nfun:_IO_remove_marker=uninstrumented\nfun:_IO_seekmark=uninstrumented\nfun:_IO_seekoff=uninstrumented\nfun:_IO_seekpos=uninstrumented\nfun:_IO_seekwmark=uninstrumented\nfun:_IO_setb=uninstrumented\nfun:_IO_setbuffer=uninstrumented\nfun:_IO_setvbuf=uninstrumented\nfun:_IO_sgetn=uninstrumented\nfun:_IO_sprintf=uninstrumented\nfun:_IO_sputbackc=uninstrumented\nfun:_IO_sputbackwc=uninstrumented\nfun:_IO_sscanf=uninstrumented\nfun:_IO_str_init_readonly=uninstrumented\nfun:_IO_str_init_static=uninstrumented\nfun:_IO_str_overflow=uninstrumented\nfun:_IO_str_pbackfail=uninstrumented\nfun:_IO_str_seekoff=uninstrumented\nfun:_IO_str_underflow=uninstrumented\nfun:_IO_sungetc=uninstrumented\nfun:_IO_sungetwc=uninstrumented\nfun:_IO_switch_to_get_mode=uninstrumented\nfun:_IO_switch_to_main_wget_area=uninstrumented\nfun:_IO_switch_to_wbackup_area=uninstrumented\nfun:_IO_switch_to_wget_mode=uninstrumented\nfun:_IO_un_link=uninstrumented\nfun:_IO_ungetc=uninstrumented\nfun:_IO_unsave_markers=uninstrumented\nfun:_IO_unsave_wmarkers=uninstrumented\nfun:_IO_vfprintf=uninstrumented\nfun:_IO_vfscanf=uninstrumented\nfun:_IO_vsprintf=uninstrumented\nfun:_IO_wdefault_doallocate=uninstrumented\nfun:_IO_wdefault_finish=uninstrumented\nfun:_IO_wdefault_pbackfail=uninstrumented\nfun:_IO_wdefault_uflow=uninstrumented\nfun:_IO_wdefault_xsgetn=uninstrumented\nfun:_IO_wdefault_xsputn=uninstrumented\nfun:_IO_wdo_write=uninstrumented\nfun:_IO_wdoallocbuf=uninstrumented\nfun:_IO_wfile_overflow=uninstrumented\nfun:_IO_wfile_seekoff=uninstrumented\nfun:_IO_wfile_sync=uninstrumented\nfun:_IO_wfile_underflow=uninstrumented\nfun:_IO_wfile_xsputn=uninstrumented\nfun:_IO_wmarker_delta=uninstrumented\nfun:_IO_wsetb=uninstrumented\nfun:_Unwind_Backtrace=uninstrumented\nfun:_Unwind_DeleteException=uninstrumented\nfun:_Unwind_FindEnclosingFunction=uninstrumented\nfun:_Unwind_Find_FDE=uninstrumented\nfun:_Unwind_ForcedUnwind=uninstrumented\nfun:_Unwind_GetCFA=uninstrumented\nfun:_Unwind_GetDataRelBase=uninstrumented\nfun:_Unwind_GetGR=uninstrumented\nfun:_Unwind_GetIP=uninstrumented\nfun:_Unwind_GetIPInfo=uninstrumented\nfun:_Unwind_GetLanguageSpecificData=uninstrumented\nfun:_Unwind_GetRegionStart=uninstrumented\nfun:_Unwind_GetTextRelBase=uninstrumented\nfun:_Unwind_RaiseException=uninstrumented\nfun:_Unwind_Resume=uninstrumented\nfun:_Unwind_Resume_or_Rethrow=uninstrumented\nfun:_Unwind_SetGR=uninstrumented\nfun:_Unwind_SetIP=uninstrumented\nfun:__absvdi2=uninstrumented\nfun:__absvsi2=uninstrumented\nfun:__absvti2=uninstrumented\nfun:__acos_finite=uninstrumented\nfun:__acosf128_finite=uninstrumented\nfun:__acosf_finite=uninstrumented\nfun:__acosh_finite=uninstrumented\nfun:__acoshf128_finite=uninstrumented\nfun:__acoshf_finite=uninstrumented\nfun:__acoshl_finite=uninstrumented\nfun:__acosl_finite=uninstrumented\nfun:__addtf3=uninstrumented\nfun:__addvdi3=uninstrumented\nfun:__addvsi3=uninstrumented\nfun:__addvti3=uninstrumented\nfun:__adjtimex=uninstrumented\nfun:__arch_prctl=uninstrumented\nfun:__argz_count=uninstrumented\nfun:__argz_next=uninstrumented\nfun:__argz_stringify=uninstrumented\nfun:__ashlti3=uninstrumented\nfun:__ashrti3=uninstrumented\nfun:__asin_finite=uninstrumented\nfun:__asinf128_finite=uninstrumented\nfun:__asinf_finite=uninstrumented\nfun:__asinl_finite=uninstrumented\nfun:__asprintf=uninstrumented\nfun:__asprintf_chk=uninstrumented\nfun:__assert=uninstrumented\nfun:__assert_fail=uninstrumented\nfun:__assert_perror_fail=uninstrumented\nfun:__atan2_finite=uninstrumented\nfun:__atan2f128_finite=uninstrumented\nfun:__atan2f_finite=uninstrumented\nfun:__atan2l_finite=uninstrumented\nfun:__atanh_finite=uninstrumented\nfun:__atanhf128_finite=uninstrumented\nfun:__atanhf_finite=uninstrumented\nfun:__atanhl_finite=uninstrumented\nfun:__avx_resms64_12=uninstrumented\nfun:__avx_resms64_13=uninstrumented\nfun:__avx_resms64_14=uninstrumented\nfun:__avx_resms64_15=uninstrumented\nfun:__avx_resms64_16=uninstrumented\nfun:__avx_resms64_17=uninstrumented\nfun:__avx_resms64_18=uninstrumented\nfun:__avx_resms64f_12=uninstrumented\nfun:__avx_resms64f_13=uninstrumented\nfun:__avx_resms64f_14=uninstrumented\nfun:__avx_resms64f_15=uninstrumented\nfun:__avx_resms64f_16=uninstrumented\nfun:__avx_resms64f_17=uninstrumented\nfun:__avx_resms64fx_12=uninstrumented\nfun:__avx_resms64fx_13=uninstrumented\nfun:__avx_resms64fx_14=uninstrumented\nfun:__avx_resms64fx_15=uninstrumented\nfun:__avx_resms64fx_16=uninstrumented\nfun:__avx_resms64fx_17=uninstrumented\nfun:__avx_resms64x_12=uninstrumented\nfun:__avx_resms64x_13=uninstrumented\nfun:__avx_resms64x_14=uninstrumented\nfun:__avx_resms64x_15=uninstrumented\nfun:__avx_resms64x_16=uninstrumented\nfun:__avx_resms64x_17=uninstrumented\nfun:__avx_resms64x_18=uninstrumented\nfun:__avx_savms64_12=uninstrumented\nfun:__avx_savms64_13=uninstrumented\nfun:__avx_savms64_14=uninstrumented\nfun:__avx_savms64_15=uninstrumented\nfun:__avx_savms64_16=uninstrumented\nfun:__avx_savms64_17=uninstrumented\nfun:__avx_savms64_18=uninstrumented\nfun:__avx_savms64f_12=uninstrumented\nfun:__avx_savms64f_13=uninstrumented\nfun:__avx_savms64f_14=uninstrumented\nfun:__avx_savms64f_15=uninstrumented\nfun:__avx_savms64f_16=uninstrumented\nfun:__avx_savms64f_17=uninstrumented\nfun:__b64_ntop=uninstrumented\nfun:__b64_pton=uninstrumented\nfun:__backtrace=uninstrumented\nfun:__backtrace_symbols=uninstrumented\nfun:__backtrace_symbols_fd=uninstrumented\nfun:__bid128_abs=uninstrumented\nfun:__bid128_add=uninstrumented\nfun:__bid128_class=uninstrumented\nfun:__bid128_copy=uninstrumented\nfun:__bid128_copySign=uninstrumented\nfun:__bid128_div=uninstrumented\nfun:__bid128_fma=uninstrumented\nfun:__bid128_from_int32=uninstrumented\nfun:__bid128_from_int64=uninstrumented\nfun:__bid128_from_uint32=uninstrumented\nfun:__bid128_from_uint64=uninstrumented\nfun:__bid128_isCanonical=uninstrumented\nfun:__bid128_isFinite=uninstrumented\nfun:__bid128_isInf=uninstrumented\nfun:__bid128_isNaN=uninstrumented\nfun:__bid128_isNormal=uninstrumented\nfun:__bid128_isSignaling=uninstrumented\nfun:__bid128_isSigned=uninstrumented\nfun:__bid128_isSubnormal=uninstrumented\nfun:__bid128_isZero=uninstrumented\nfun:__bid128_mul=uninstrumented\nfun:__bid128_negate=uninstrumented\nfun:__bid128_quiet_equal=uninstrumented\nfun:__bid128_quiet_greater=uninstrumented\nfun:__bid128_quiet_greater_equal=uninstrumented\nfun:__bid128_quiet_greater_unordered=uninstrumented\nfun:__bid128_quiet_less=uninstrumented\nfun:__bid128_quiet_less_equal=uninstrumented\nfun:__bid128_quiet_less_unordered=uninstrumented\nfun:__bid128_quiet_not_equal=uninstrumented\nfun:__bid128_quiet_not_greater=uninstrumented\nfun:__bid128_quiet_not_less=uninstrumented\nfun:__bid128_quiet_ordered=uninstrumented\nfun:__bid128_quiet_unordered=uninstrumented\nfun:__bid128_radix=uninstrumented\nfun:__bid128_sameQuantum=uninstrumented\nfun:__bid128_signaling_greater=uninstrumented\nfun:__bid128_signaling_greater_equal=uninstrumented\nfun:__bid128_signaling_greater_unordered=uninstrumented\nfun:__bid128_signaling_less=uninstrumented\nfun:__bid128_signaling_less_equal=uninstrumented\nfun:__bid128_signaling_less_unordered=uninstrumented\nfun:__bid128_signaling_not_greater=uninstrumented\nfun:__bid128_signaling_not_less=uninstrumented\nfun:__bid128_sub=uninstrumented\nfun:__bid128_to_bid32=uninstrumented\nfun:__bid128_to_bid64=uninstrumented\nfun:__bid128_to_binary128=uninstrumented\nfun:__bid128_to_binary32=uninstrumented\nfun:__bid128_to_binary64=uninstrumented\nfun:__bid128_to_binary80=uninstrumented\nfun:__bid128_to_int32_ceil=uninstrumented\nfun:__bid128_to_int32_floor=uninstrumented\nfun:__bid128_to_int32_int=uninstrumented\nfun:__bid128_to_int32_rnint=uninstrumented\nfun:__bid128_to_int32_rninta=uninstrumented\nfun:__bid128_to_int32_xceil=uninstrumented\nfun:__bid128_to_int32_xfloor=uninstrumented\nfun:__bid128_to_int32_xint=uninstrumented\nfun:__bid128_to_int32_xrnint=uninstrumented\nfun:__bid128_to_int32_xrninta=uninstrumented\nfun:__bid128_to_int64_ceil=uninstrumented\nfun:__bid128_to_int64_floor=uninstrumented\nfun:__bid128_to_int64_int=uninstrumented\nfun:__bid128_to_int64_rnint=uninstrumented\nfun:__bid128_to_int64_rninta=uninstrumented\nfun:__bid128_to_int64_xceil=uninstrumented\nfun:__bid128_to_int64_xfloor=uninstrumented\nfun:__bid128_to_int64_xint=uninstrumented\nfun:__bid128_to_int64_xrnint=uninstrumented\nfun:__bid128_to_int64_xrninta=uninstrumented\nfun:__bid128_to_uint32_ceil=uninstrumented\nfun:__bid128_to_uint32_floor=uninstrumented\nfun:__bid128_to_uint32_int=uninstrumented\nfun:__bid128_to_uint32_rnint=uninstrumented\nfun:__bid128_to_uint32_rninta=uninstrumented\nfun:__bid128_to_uint32_xceil=uninstrumented\nfun:__bid128_to_uint32_xfloor=uninstrumented\nfun:__bid128_to_uint32_xint=uninstrumented\nfun:__bid128_to_uint32_xrnint=uninstrumented\nfun:__bid128_to_uint32_xrninta=uninstrumented\nfun:__bid128_to_uint64_ceil=uninstrumented\nfun:__bid128_to_uint64_floor=uninstrumented\nfun:__bid128_to_uint64_int=uninstrumented\nfun:__bid128_to_uint64_rnint=uninstrumented\nfun:__bid128_to_uint64_rninta=uninstrumented\nfun:__bid128_to_uint64_xceil=uninstrumented\nfun:__bid128_to_uint64_xfloor=uninstrumented\nfun:__bid128_to_uint64_xint=uninstrumented\nfun:__bid128_to_uint64_xrnint=uninstrumented\nfun:__bid128_to_uint64_xrninta=uninstrumented\nfun:__bid128_totalOrder=uninstrumented\nfun:__bid128_totalOrderMag=uninstrumented\nfun:__bid128dd_add=uninstrumented\nfun:__bid128dd_div=uninstrumented\nfun:__bid128dd_mul=uninstrumented\nfun:__bid128dd_sub=uninstrumented\nfun:__bid128ddd_fma=uninstrumented\nfun:__bid128ddq_fma=uninstrumented\nfun:__bid128dq_add=uninstrumented\nfun:__bid128dq_div=uninstrumented\nfun:__bid128dq_mul=uninstrumented\nfun:__bid128dq_sub=uninstrumented\nfun:__bid128dqd_fma=uninstrumented\nfun:__bid128dqq_fma=uninstrumented\nfun:__bid128qd_add=uninstrumented\nfun:__bid128qd_div=uninstrumented\nfun:__bid128qd_mul=uninstrumented\nfun:__bid128qd_sub=uninstrumented\nfun:__bid128qdd_fma=uninstrumented\nfun:__bid128qdq_fma=uninstrumented\nfun:__bid128qqd_fma=uninstrumented\nfun:__bid32_to_bid128=uninstrumented\nfun:__bid32_to_bid64=uninstrumented\nfun:__bid32_to_binary128=uninstrumented\nfun:__bid32_to_binary32=uninstrumented\nfun:__bid32_to_binary64=uninstrumented\nfun:__bid32_to_binary80=uninstrumented\nfun:__bid64_abs=uninstrumented\nfun:__bid64_add=uninstrumented\nfun:__bid64_class=uninstrumented\nfun:__bid64_copy=uninstrumented\nfun:__bid64_copySign=uninstrumented\nfun:__bid64_div=uninstrumented\nfun:__bid64_from_int32=uninstrumented\nfun:__bid64_from_int64=uninstrumented\nfun:__bid64_from_uint32=uninstrumented\nfun:__bid64_from_uint64=uninstrumented\nfun:__bid64_isCanonical=uninstrumented\nfun:__bid64_isFinite=uninstrumented\nfun:__bid64_isInf=uninstrumented\nfun:__bid64_isNaN=uninstrumented\nfun:__bid64_isNormal=uninstrumented\nfun:__bid64_isSignaling=uninstrumented\nfun:__bid64_isSigned=uninstrumented\nfun:__bid64_isSubnormal=uninstrumented\nfun:__bid64_isZero=uninstrumented\nfun:__bid64_mul=uninstrumented\nfun:__bid64_negate=uninstrumented\nfun:__bid64_quiet_equal=uninstrumented\nfun:__bid64_quiet_greater=uninstrumented\nfun:__bid64_quiet_greater_equal=uninstrumented\nfun:__bid64_quiet_greater_unordered=uninstrumented\nfun:__bid64_quiet_less=uninstrumented\nfun:__bid64_quiet_less_equal=uninstrumented\nfun:__bid64_quiet_less_unordered=uninstrumented\nfun:__bid64_quiet_not_equal=uninstrumented\nfun:__bid64_quiet_not_greater=uninstrumented\nfun:__bid64_quiet_not_less=uninstrumented\nfun:__bid64_quiet_ordered=uninstrumented\nfun:__bid64_quiet_unordered=uninstrumented\nfun:__bid64_radix=uninstrumented\nfun:__bid64_sameQuantum=uninstrumented\nfun:__bid64_signaling_greater=uninstrumented\nfun:__bid64_signaling_greater_equal=uninstrumented\nfun:__bid64_signaling_greater_unordered=uninstrumented\nfun:__bid64_signaling_less=uninstrumented\nfun:__bid64_signaling_less_equal=uninstrumented\nfun:__bid64_signaling_less_unordered=uninstrumented\nfun:__bid64_signaling_not_greater=uninstrumented\nfun:__bid64_signaling_not_less=uninstrumented\nfun:__bid64_sub=uninstrumented\nfun:__bid64_to_bid128=uninstrumented\nfun:__bid64_to_bid32=uninstrumented\nfun:__bid64_to_binary128=uninstrumented\nfun:__bid64_to_binary32=uninstrumented\nfun:__bid64_to_binary64=uninstrumented\nfun:__bid64_to_binary80=uninstrumented\nfun:__bid64_to_int32_ceil=uninstrumented\nfun:__bid64_to_int32_floor=uninstrumented\nfun:__bid64_to_int32_int=uninstrumented\nfun:__bid64_to_int32_rnint=uninstrumented\nfun:__bid64_to_int32_rninta=uninstrumented\nfun:__bid64_to_int32_xceil=uninstrumented\nfun:__bid64_to_int32_xfloor=uninstrumented\nfun:__bid64_to_int32_xint=uninstrumented\nfun:__bid64_to_int32_xrnint=uninstrumented\nfun:__bid64_to_int32_xrninta=uninstrumented\nfun:__bid64_to_int64_ceil=uninstrumented\nfun:__bid64_to_int64_floor=uninstrumented\nfun:__bid64_to_int64_int=uninstrumented\nfun:__bid64_to_int64_rnint=uninstrumented\nfun:__bid64_to_int64_rninta=uninstrumented\nfun:__bid64_to_int64_xceil=uninstrumented\nfun:__bid64_to_int64_xfloor=uninstrumented\nfun:__bid64_to_int64_xint=uninstrumented\nfun:__bid64_to_int64_xrnint=uninstrumented\nfun:__bid64_to_int64_xrninta=uninstrumented\nfun:__bid64_to_uint32_ceil=uninstrumented\nfun:__bid64_to_uint32_floor=uninstrumented\nfun:__bid64_to_uint32_int=uninstrumented\nfun:__bid64_to_uint32_rnint=uninstrumented\nfun:__bid64_to_uint32_rninta=uninstrumented\nfun:__bid64_to_uint32_xceil=uninstrumented\nfun:__bid64_to_uint32_xfloor=uninstrumented\nfun:__bid64_to_uint32_xint=uninstrumented\nfun:__bid64_to_uint32_xrnint=uninstrumented\nfun:__bid64_to_uint32_xrninta=uninstrumented\nfun:__bid64_to_uint64_ceil=uninstrumented\nfun:__bid64_to_uint64_floor=uninstrumented\nfun:__bid64_to_uint64_int=uninstrumented\nfun:__bid64_to_uint64_rnint=uninstrumented\nfun:__bid64_to_uint64_rninta=uninstrumented\nfun:__bid64_to_uint64_xceil=uninstrumented\nfun:__bid64_to_uint64_xfloor=uninstrumented\nfun:__bid64_to_uint64_xint=uninstrumented\nfun:__bid64_to_uint64_xrnint=uninstrumented\nfun:__bid64_to_uint64_xrninta=uninstrumented\nfun:__bid64_totalOrder=uninstrumented\nfun:__bid64_totalOrderMag=uninstrumented\nfun:__bid64ddq_fma=uninstrumented\nfun:__bid64dq_add=uninstrumented\nfun:__bid64dq_div=uninstrumented\nfun:__bid64dq_mul=uninstrumented\nfun:__bid64dq_sub=uninstrumented\nfun:__bid64dqd_fma=uninstrumented\nfun:__bid64dqq_fma=uninstrumented\nfun:__bid64qd_add=uninstrumented\nfun:__bid64qd_div=uninstrumented\nfun:__bid64qd_mul=uninstrumented\nfun:__bid64qd_sub=uninstrumented\nfun:__bid64qdd_fma=uninstrumented\nfun:__bid64qdq_fma=uninstrumented\nfun:__bid64qq_add=uninstrumented\nfun:__bid64qq_div=uninstrumented\nfun:__bid64qq_mul=uninstrumented\nfun:__bid64qq_sub=uninstrumented\nfun:__bid64qqd_fma=uninstrumented\nfun:__bid64qqq_fma=uninstrumented\nfun:__bid_adddd3=uninstrumented\nfun:__bid_addsd3=uninstrumented\nfun:__bid_addtd3=uninstrumented\nfun:__bid_divdd3=uninstrumented\nfun:__bid_divsd3=uninstrumented\nfun:__bid_divtd3=uninstrumented\nfun:__bid_eqdd2=uninstrumented\nfun:__bid_eqsd2=uninstrumented\nfun:__bid_eqtd2=uninstrumented\nfun:__bid_extendddtd2=uninstrumented\nfun:__bid_extendddtf=uninstrumented\nfun:__bid_extendddxf=uninstrumented\nfun:__bid_extenddfdd=uninstrumented\nfun:__bid_extenddftd=uninstrumented\nfun:__bid_extendsddd2=uninstrumented\nfun:__bid_extendsddf=uninstrumented\nfun:__bid_extendsdtd2=uninstrumented\nfun:__bid_extendsdtf=uninstrumented\nfun:__bid_extendsdxf=uninstrumented\nfun:__bid_extendsfdd=uninstrumented\nfun:__bid_extendsfsd=uninstrumented\nfun:__bid_extendsftd=uninstrumented\nfun:__bid_extendtftd=uninstrumented\nfun:__bid_extendxftd=uninstrumented\nfun:__bid_fixdddi=uninstrumented\nfun:__bid_fixddsi=uninstrumented\nfun:__bid_fixsddi=uninstrumented\nfun:__bid_fixsdsi=uninstrumented\nfun:__bid_fixtddi=uninstrumented\nfun:__bid_fixtdsi=uninstrumented\nfun:__bid_fixunsdddi=uninstrumented\nfun:__bid_fixunsddsi=uninstrumented\nfun:__bid_fixunssddi=uninstrumented\nfun:__bid_fixunssdsi=uninstrumented\nfun:__bid_fixunstddi=uninstrumented\nfun:__bid_fixunstdsi=uninstrumented\nfun:__bid_floatdidd=uninstrumented\nfun:__bid_floatdisd=uninstrumented\nfun:__bid_floatditd=uninstrumented\nfun:__bid_floatsidd=uninstrumented\nfun:__bid_floatsisd=uninstrumented\nfun:__bid_floatsitd=uninstrumented\nfun:__bid_floatunsdidd=uninstrumented\nfun:__bid_floatunsdisd=uninstrumented\nfun:__bid_floatunsditd=uninstrumented\nfun:__bid_floatunssidd=uninstrumented\nfun:__bid_floatunssisd=uninstrumented\nfun:__bid_floatunssitd=uninstrumented\nfun:__bid_gedd2=uninstrumented\nfun:__bid_gesd2=uninstrumented\nfun:__bid_getd2=uninstrumented\nfun:__bid_gtdd2=uninstrumented\nfun:__bid_gtsd2=uninstrumented\nfun:__bid_gttd2=uninstrumented\nfun:__bid_ledd2=uninstrumented\nfun:__bid_lesd2=uninstrumented\nfun:__bid_letd2=uninstrumented\nfun:__bid_ltdd2=uninstrumented\nfun:__bid_ltsd2=uninstrumented\nfun:__bid_lttd2=uninstrumented\nfun:__bid_muldd3=uninstrumented\nfun:__bid_mulsd3=uninstrumented\nfun:__bid_multd3=uninstrumented\nfun:__bid_nedd2=uninstrumented\nfun:__bid_nesd2=uninstrumented\nfun:__bid_netd2=uninstrumented\nfun:__bid_round128_19_38=uninstrumented\nfun:__bid_round192_39_57=uninstrumented\nfun:__bid_round256_58_76=uninstrumented\nfun:__bid_round64_2_18=uninstrumented\nfun:__bid_subdd3=uninstrumented\nfun:__bid_subsd3=uninstrumented\nfun:__bid_subtd3=uninstrumented\nfun:__bid_truncdddf=uninstrumented\nfun:__bid_truncddsd2=uninstrumented\nfun:__bid_truncddsf=uninstrumented\nfun:__bid_truncdfsd=uninstrumented\nfun:__bid_truncsdsf=uninstrumented\nfun:__bid_trunctddd2=uninstrumented\nfun:__bid_trunctddf=uninstrumented\nfun:__bid_trunctdsd2=uninstrumented\nfun:__bid_trunctdsf=uninstrumented\nfun:__bid_trunctdtf=uninstrumented\nfun:__bid_trunctdxf=uninstrumented\nfun:__bid_trunctfdd=uninstrumented\nfun:__bid_trunctfsd=uninstrumented\nfun:__bid_truncxfdd=uninstrumented\nfun:__bid_truncxfsd=uninstrumented\nfun:__bid_unorddd2=uninstrumented\nfun:__bid_unordsd2=uninstrumented\nfun:__bid_unordtd2=uninstrumented\nfun:__binary128_to_bid128=uninstrumented\nfun:__binary128_to_bid32=uninstrumented\nfun:__binary128_to_bid64=uninstrumented\nfun:__binary32_to_bid128=uninstrumented\nfun:__binary32_to_bid32=uninstrumented\nfun:__binary32_to_bid64=uninstrumented\nfun:__binary64_to_bid128=uninstrumented\nfun:__binary64_to_bid32=uninstrumented\nfun:__binary64_to_bid64=uninstrumented\nfun:__binary80_to_bid128=uninstrumented\nfun:__binary80_to_bid32=uninstrumented\nfun:__binary80_to_bid64=uninstrumented\nfun:__bsd_getpgrp=uninstrumented\nfun:__bswapdi2=uninstrumented\nfun:__bswapsi2=uninstrumented\nfun:__bzero=uninstrumented\nfun:__call_tls_dtors=uninstrumented\nfun:__chk_fail=uninstrumented\nfun:__clear_cache=uninstrumented\nfun:__clock_gettime=uninstrumented\nfun:__clog10=uninstrumented\nfun:__clog10f=uninstrumented\nfun:__clog10l=uninstrumented\nfun:__clone=uninstrumented\nfun:__close=uninstrumented\nfun:__close_nocancel=uninstrumented\nfun:__clrsbdi2=uninstrumented\nfun:__clrsbti2=uninstrumented\nfun:__clzdi2=uninstrumented\nfun:__clzti2=uninstrumented\nfun:__cmpti2=uninstrumented\nfun:__cmsg_nxthdr=uninstrumented\nfun:__confstr_chk=uninstrumented\nfun:__connect=uninstrumented\nfun:__copy_grp=uninstrumented\nfun:__cosh_finite=uninstrumented\nfun:__coshf128_finite=uninstrumented\nfun:__coshf_finite=uninstrumented\nfun:__coshl_finite=uninstrumented\nfun:__cpu_indicator_init=uninstrumented\nfun:__create_ib_request=uninstrumented\nfun:__ctype_b_loc=uninstrumented\nfun:__ctype_get_mb_cur_max=uninstrumented\nfun:__ctype_init=uninstrumented\nfun:__ctype_tolower_loc=uninstrumented\nfun:__ctype_toupper_loc=uninstrumented\nfun:__ctzdi2=uninstrumented\nfun:__ctzti2=uninstrumented\nfun:__cxa_at_quick_exit=uninstrumented\nfun:__cxa_atexit=uninstrumented\nfun:__cxa_finalize=uninstrumented\nfun:__cxa_thread_atexit_impl=uninstrumented\nfun:__cyg_profile_func_enter=uninstrumented\nfun:__cyg_profile_func_exit=uninstrumented\nfun:__dcgettext=uninstrumented\nfun:__default_morecore=uninstrumented\nfun:__deregister_frame=uninstrumented\nfun:__deregister_frame_info=uninstrumented\nfun:__deregister_frame_info_bases=uninstrumented\nfun:__dfp_clear_except=uninstrumented\nfun:__dfp_get_round=uninstrumented\nfun:__dfp_raise_except=uninstrumented\nfun:__dfp_set_round=uninstrumented\nfun:__dfp_test_except=uninstrumented\nfun:__dgettext=uninstrumented\nfun:__divdc3=uninstrumented\nfun:__divhc3=uninstrumented\nfun:__divmodti4=uninstrumented\nfun:__divsc3=uninstrumented\nfun:__divtc3=uninstrumented\nfun:__divtf3=uninstrumented\nfun:__divti3=uninstrumented\nfun:__divxc3=uninstrumented\nfun:__dn_comp=uninstrumented\nfun:__dn_count_labels=uninstrumented\nfun:__dn_expand=uninstrumented\nfun:__dn_skipname=uninstrumented\nfun:__do_niscall3=uninstrumented\nfun:__dprintf_chk=uninstrumented\nfun:__dup2=uninstrumented\nfun:__duplocale=uninstrumented\nfun:__emutls_get_address=uninstrumented\nfun:__emutls_register_common=uninstrumented\nfun:__enable_execute_stack=uninstrumented\nfun:__endmntent=uninstrumented\nfun:__eprintf=uninstrumented\nfun:__eqhf2=uninstrumented\nfun:__eqtf2=uninstrumented\nfun:__errno_location=uninstrumented\nfun:__exp10_finite=uninstrumented\nfun:__exp10f128_finite=uninstrumented\nfun:__exp10f_finite=uninstrumented\nfun:__exp10l_finite=uninstrumented\nfun:__exp2_finite=uninstrumented\nfun:__exp2f128_finite=uninstrumented\nfun:__exp2f_finite=uninstrumented\nfun:__exp2l_finite=uninstrumented\nfun:__exp_finite=uninstrumented\nfun:__expf128_finite=uninstrumented\nfun:__expf_finite=uninstrumented\nfun:__expl_finite=uninstrumented\nfun:__explicit_bzero_chk=uninstrumented\nfun:__extenddftf2=uninstrumented\nfun:__extendhfdf2=uninstrumented\nfun:__extendhfsf2=uninstrumented\nfun:__extendhftf2=uninstrumented\nfun:__extendhfxf2=uninstrumented\nfun:__extendsfdf2=uninstrumented\nfun:__extendsftf2=uninstrumented\nfun:__extendxftf2=uninstrumented\nfun:__fbufsize=uninstrumented\nfun:__fcntl=uninstrumented\nfun:__fdelt_chk=uninstrumented\nfun:__fdelt_warn=uninstrumented\nfun:__fentry__=uninstrumented\nfun:__ffs=uninstrumented\nfun:__ffsdi2=uninstrumented\nfun:__ffsti2=uninstrumented\nfun:__fgets_chk=uninstrumented\nfun:__fgets_unlocked_chk=uninstrumented\nfun:__fgetws_chk=uninstrumented\nfun:__fgetws_unlocked_chk=uninstrumented\nfun:__file_change_detection_for_fp=uninstrumented\nfun:__file_change_detection_for_path=uninstrumented\nfun:__file_change_detection_for_stat=uninstrumented\nfun:__file_is_unchanged=uninstrumented\nfun:__finite=uninstrumented\nfun:__finitef=uninstrumented\nfun:__finitef128=uninstrumented\nfun:__finitel=uninstrumented\nfun:__fixdfti=uninstrumented\nfun:__fixhfti=uninstrumented\nfun:__fixsfti=uninstrumented\nfun:__fixtfdi=uninstrumented\nfun:__fixtfsi=uninstrumented\nfun:__fixtfti=uninstrumented\nfun:__fixunsdfdi=uninstrumented\nfun:__fixunsdfti=uninstrumented\nfun:__fixunshfti=uninstrumented\nfun:__fixunssfdi=uninstrumented\nfun:__fixunssfti=uninstrumented\nfun:__fixunstfdi=uninstrumented\nfun:__fixunstfsi=uninstrumented\nfun:__fixunstfti=uninstrumented\nfun:__fixunsxfdi=uninstrumented\nfun:__fixunsxfti=uninstrumented\nfun:__fixxfti=uninstrumented\nfun:__flbf=uninstrumented\nfun:__floatditf=uninstrumented\nfun:__floatsitf=uninstrumented\nfun:__floattidf=uninstrumented\nfun:__floattihf=uninstrumented\nfun:__floattisf=uninstrumented\nfun:__floattitf=uninstrumented\nfun:__floattixf=uninstrumented\nfun:__floatunditf=uninstrumented\nfun:__floatunsitf=uninstrumented\nfun:__floatuntidf=uninstrumented\nfun:__floatuntihf=uninstrumented\nfun:__floatuntisf=uninstrumented\nfun:__floatuntitf=uninstrumented\nfun:__floatuntixf=uninstrumented\nfun:__fmod_finite=uninstrumented\nfun:__fmodf128_finite=uninstrumented\nfun:__fmodf_finite=uninstrumented\nfun:__fmodl_finite=uninstrumented\nfun:__follow_path=uninstrumented\nfun:__fork=uninstrumented\nfun:__fortify_fail=uninstrumented\nfun:__fp_nquery=uninstrumented\nfun:__fp_query=uninstrumented\nfun:__fp_resstat=uninstrumented\nfun:__fpclassify=uninstrumented\nfun:__fpclassifyf=uninstrumented\nfun:__fpclassifyf128=uninstrumented\nfun:__fpclassifyl=uninstrumented\nfun:__fpending=uninstrumented\nfun:__fprintf_chk=uninstrumented\nfun:__fpurge=uninstrumented\nfun:__fread_chk=uninstrumented\nfun:__fread_unlocked_chk=uninstrumented\nfun:__freadable=uninstrumented\nfun:__freading=uninstrumented\nfun:__free_fdresult=uninstrumented\nfun:__freelocale=uninstrumented\nfun:__fseeko64=uninstrumented\nfun:__fsetlocking=uninstrumented\nfun:__fstat64=uninstrumented\nfun:__ftello64=uninstrumented\nfun:__fwprintf_chk=uninstrumented\nfun:__fwritable=uninstrumented\nfun:__fwriting=uninstrumented\nfun:__fxstat=uninstrumented\nfun:__fxstat64=uninstrumented\nfun:__fxstatat=uninstrumented\nfun:__fxstatat64=uninstrumented\nfun:__gamma_r_finite=uninstrumented\nfun:__gammaf128_r_finite=uninstrumented\nfun:__gammaf_r_finite=uninstrumented\nfun:__gammal_r_finite=uninstrumented\nfun:__gcc_bcmp=uninstrumented\nfun:__gcc_personality_v0=uninstrumented\nfun:__gconv_create_spec=uninstrumented\nfun:__gconv_destroy_spec=uninstrumented\nfun:__gconv_get_alias_db=uninstrumented\nfun:__gconv_get_cache=uninstrumented\nfun:__gconv_get_modules_db=uninstrumented\nfun:__gconv_open=uninstrumented\nfun:__gconv_transliterate=uninstrumented\nfun:__generic_findstack=uninstrumented\nfun:__generic_morestack=uninstrumented\nfun:__generic_morestack_set_initial_sp=uninstrumented\nfun:__generic_releasestack=uninstrumented\nfun:__getauxval=uninstrumented\nfun:__getcwd_chk=uninstrumented\nfun:__getdelim=uninstrumented\nfun:__getdomainname_chk=uninstrumented\nfun:__getf2=uninstrumented\nfun:__getgroups_chk=uninstrumented\nfun:__gethostname_chk=uninstrumented\nfun:__getlogin_r_chk=uninstrumented\nfun:__getmntent_r=uninstrumented\nfun:__getpagesize=uninstrumented\nfun:__getpgid=uninstrumented\nfun:__getpid=uninstrumented\nfun:__getrlimit=uninstrumented\nfun:__gets_chk=uninstrumented\nfun:__gettimeofday=uninstrumented\nfun:__getwd_chk=uninstrumented\nfun:__gmtime_r=uninstrumented\nfun:__gttf2=uninstrumented\nfun:__h_errno_location=uninstrumented\nfun:__hostalias=uninstrumented\nfun:__hypot_finite=uninstrumented\nfun:__hypotf128_finite=uninstrumented\nfun:__hypotf_finite=uninstrumented\nfun:__hypotl_finite=uninstrumented\nfun:__idna_from_dns_encoding=uninstrumented\nfun:__idna_to_dns_encoding=uninstrumented\nfun:__inet6_scopeid_pton=uninstrumented\nfun:__inet_aton_exact=uninstrumented\nfun:__inet_pton_length=uninstrumented\nfun:__internal_endnetgrent=uninstrumented\nfun:__internal_getnetgrent_r=uninstrumented\nfun:__internal_setnetgrent=uninstrumented\nfun:__isalnum_l=uninstrumented\nfun:__isalpha_l=uninstrumented\nfun:__isascii_l=uninstrumented\nfun:__isblank_l=uninstrumented\nfun:__iscanonicall=uninstrumented\nfun:__iscntrl_l=uninstrumented\nfun:__isctype=uninstrumented\nfun:__isdigit_l=uninstrumented\nfun:__iseqsig=uninstrumented\nfun:__iseqsigf=uninstrumented\nfun:__iseqsigf128=uninstrumented\nfun:__iseqsigl=uninstrumented\nfun:__isgraph_l=uninstrumented\nfun:__isinf=uninstrumented\nfun:__isinff=uninstrumented\nfun:__isinff128=uninstrumented\nfun:__isinfl=uninstrumented\nfun:__islower_l=uninstrumented\nfun:__isnan=uninstrumented\nfun:__isnanf=uninstrumented\nfun:__isnanf128=uninstrumented\nfun:__isnanl=uninstrumented\nfun:__isoc99_fscanf=uninstrumented\nfun:__isoc99_fwscanf=uninstrumented\nfun:__isoc99_scanf=uninstrumented\nfun:__isoc99_sscanf=uninstrumented\nfun:__isoc99_swscanf=uninstrumented\nfun:__isoc99_vfscanf=uninstrumented\nfun:__isoc99_vfwscanf=uninstrumented\nfun:__isoc99_vscanf=uninstrumented\nfun:__isoc99_vsscanf=uninstrumented\nfun:__isoc99_vswscanf=uninstrumented\nfun:__isoc99_vwscanf=uninstrumented\nfun:__isoc99_wscanf=uninstrumented\nfun:__isprint_l=uninstrumented\nfun:__ispunct_l=uninstrumented\nfun:__issignaling=uninstrumented\nfun:__issignalingf=uninstrumented\nfun:__issignalingf128=uninstrumented\nfun:__issignalingl=uninstrumented\nfun:__isspace_l=uninstrumented\nfun:__isupper_l=uninstrumented\nfun:__iswalnum_l=uninstrumented\nfun:__iswalpha_l=uninstrumented\nfun:__iswblank_l=uninstrumented\nfun:__iswcntrl_l=uninstrumented\nfun:__iswctype=uninstrumented\nfun:__iswctype_l=uninstrumented\nfun:__iswdigit_l=uninstrumented\nfun:__iswgraph_l=uninstrumented\nfun:__iswlower_l=uninstrumented\nfun:__iswprint_l=uninstrumented\nfun:__iswpunct_l=uninstrumented\nfun:__iswspace_l=uninstrumented\nfun:__iswupper_l=uninstrumented\nfun:__iswxdigit_l=uninstrumented\nfun:__isxdigit_l=uninstrumented\nfun:__ivaliduser=uninstrumented\nfun:__j0_finite=uninstrumented\nfun:__j0f128_finite=uninstrumented\nfun:__j0f_finite=uninstrumented\nfun:__j0l_finite=uninstrumented\nfun:__j1_finite=uninstrumented\nfun:__j1f128_finite=uninstrumented\nfun:__j1f_finite=uninstrumented\nfun:__j1l_finite=uninstrumented\nfun:__jn_finite=uninstrumented\nfun:__jnf128_finite=uninstrumented\nfun:__jnf_finite=uninstrumented\nfun:__jnl_finite=uninstrumented\nfun:__letf2=uninstrumented\nfun:__lgamma_r_finite=uninstrumented\nfun:__lgammaf128_r_finite=uninstrumented\nfun:__lgammaf_r_finite=uninstrumented\nfun:__lgammal_r_finite=uninstrumented\nfun:__libanl_version_placeholder=uninstrumented\nfun:__libc_alloc_buffer_alloc_array=uninstrumented\nfun:__libc_alloc_buffer_allocate=uninstrumented\nfun:__libc_alloc_buffer_copy_bytes=uninstrumented\nfun:__libc_alloc_buffer_copy_string=uninstrumented\nfun:__libc_alloc_buffer_create_failure=uninstrumented\nfun:__libc_alloca_cutoff=uninstrumented\nfun:__libc_allocate_once_slow=uninstrumented\nfun:__libc_allocate_rtsig=uninstrumented\nfun:__libc_calloc=uninstrumented\nfun:__libc_clntudp_bufcreate=uninstrumented\nfun:__libc_current_sigrtmax=uninstrumented\nfun:__libc_current_sigrtmin=uninstrumented\nfun:__libc_dn_expand=uninstrumented\nfun:__libc_dn_skipname=uninstrumented\nfun:__libc_dynarray_at_failure=uninstrumented\nfun:__libc_dynarray_emplace_enlarge=uninstrumented\nfun:__libc_dynarray_finalize=uninstrumented\nfun:__libc_dynarray_resize=uninstrumented\nfun:__libc_dynarray_resize_clear=uninstrumented\nfun:__libc_early_init=uninstrumented\nfun:__libc_fatal=uninstrumented\nfun:__libc_fcntl64=uninstrumented\nfun:__libc_fork=uninstrumented\nfun:__libc_free=uninstrumented\nfun:__libc_freeres=uninstrumented\nfun:__libc_ifunc_impl_list=uninstrumented\nfun:__libc_init_first=uninstrumented\nfun:__libc_mallinfo=uninstrumented\nfun:__libc_malloc=uninstrumented\nfun:__libc_mallopt=uninstrumented\nfun:__libc_memalign=uninstrumented\nfun:__libc_msgrcv=uninstrumented\nfun:__libc_msgsnd=uninstrumented\nfun:__libc_ns_makecanon=uninstrumented\nfun:__libc_ns_samename=uninstrumented\nfun:__libc_pread=uninstrumented\nfun:__libc_pvalloc=uninstrumented\nfun:__libc_pwrite=uninstrumented\nfun:__libc_realloc=uninstrumented\nfun:__libc_reallocarray=uninstrumented\nfun:__libc_res_dnok=uninstrumented\nfun:__libc_res_hnok=uninstrumented\nfun:__libc_res_nameinquery=uninstrumented\nfun:__libc_res_queriesmatch=uninstrumented\nfun:__libc_rpc_getport=uninstrumented\nfun:__libc_sa_len=uninstrumented\nfun:__libc_scratch_buffer_dupfree=uninstrumented\nfun:__libc_scratch_buffer_grow=uninstrumented\nfun:__libc_scratch_buffer_grow_preserve=uninstrumented\nfun:__libc_scratch_buffer_set_array_size=uninstrumented\nfun:__libc_secure_getenv=uninstrumented\nfun:__libc_sigaction=uninstrumented\nfun:__libc_start_main=uninstrumented\nfun:__libc_system=uninstrumented\nfun:__libc_unwind_link_get=uninstrumented\nfun:__libc_valloc=uninstrumented\nfun:__libdl_version_placeholder=uninstrumented\nfun:__libpthread_version_placeholder=uninstrumented\nfun:__librt_version_placeholder=uninstrumented\nfun:__libutil_version_placeholder=uninstrumented\nfun:__lll_lock_wait_private=uninstrumented\nfun:__lll_lock_wake_private=uninstrumented\nfun:__loc_aton=uninstrumented\nfun:__loc_ntoa=uninstrumented\nfun:__log10_finite=uninstrumented\nfun:__log10f128_finite=uninstrumented\nfun:__log10f_finite=uninstrumented\nfun:__log10l_finite=uninstrumented\nfun:__log2_finite=uninstrumented\nfun:__log2f128_finite=uninstrumented\nfun:__log2f_finite=uninstrumented\nfun:__log2l_finite=uninstrumented\nfun:__log_finite=uninstrumented\nfun:__logf128_finite=uninstrumented\nfun:__logf_finite=uninstrumented\nfun:__logl_finite=uninstrumented\nfun:__longjmp_chk=uninstrumented\nfun:__lseek=uninstrumented\nfun:__lshrti3=uninstrumented\nfun:__lttf2=uninstrumented\nfun:__lxstat=uninstrumented\nfun:__lxstat64=uninstrumented\nfun:__madvise=uninstrumented\nfun:__mbrlen=uninstrumented\nfun:__mbrtowc=uninstrumented\nfun:__mbsnrtowcs_chk=uninstrumented\nfun:__mbsrtowcs_chk=uninstrumented\nfun:__mbstowcs_chk=uninstrumented\nfun:__memcmpeq=uninstrumented\nfun:__memcpy_chk=uninstrumented\nfun:__memmove_chk=uninstrumented\nfun:__mempcpy=uninstrumented\nfun:__mempcpy_chk=uninstrumented\nfun:__mempcpy_small=uninstrumented\nfun:__memset_chk=uninstrumented\nfun:__merge_grp=uninstrumented\nfun:__mktemp=uninstrumented\nfun:__mmap=uninstrumented\nfun:__modti3=uninstrumented\nfun:__monstartup=uninstrumented\nfun:__morestack=uninstrumented\nfun:__morestack_allocate_stack_space=uninstrumented\nfun:__morestack_block_signals=uninstrumented\nfun:__morestack_fail=uninstrumented\nfun:__morestack_get_guard=uninstrumented\nfun:__morestack_large_model=uninstrumented\nfun:__morestack_load_mmap=uninstrumented\nfun:__morestack_make_guard=uninstrumented\nfun:__morestack_non_split=uninstrumented\nfun:__morestack_release_segments=uninstrumented\nfun:__morestack_set_guard=uninstrumented\nfun:__morestack_unblock_signals=uninstrumented\nfun:__mprotect=uninstrumented\nfun:__mq_open_2=uninstrumented\nfun:__muldc3=uninstrumented\nfun:__mulhc3=uninstrumented\nfun:__mulsc3=uninstrumented\nfun:__multc3=uninstrumented\nfun:__multf3=uninstrumented\nfun:__multi3=uninstrumented\nfun:__mulvdi3=uninstrumented\nfun:__mulvsi3=uninstrumented\nfun:__mulvti3=uninstrumented\nfun:__mulxc3=uninstrumented\nfun:__munmap=uninstrumented\nfun:__nanosleep=uninstrumented\nfun:__negtf2=uninstrumented\nfun:__negti2=uninstrumented\nfun:__negvdi2=uninstrumented\nfun:__negvsi2=uninstrumented\nfun:__negvti2=uninstrumented\nfun:__nehf2=uninstrumented\nfun:__netf2=uninstrumented\nfun:__netlink_assert_response=uninstrumented\nfun:__newlocale=uninstrumented\nfun:__nis_default_access=uninstrumented\nfun:__nis_default_group=uninstrumented\nfun:__nis_default_owner=uninstrumented\nfun:__nis_default_ttl=uninstrumented\nfun:__nis_finddirectory=uninstrumented\nfun:__nis_hash=uninstrumented\nfun:__nisbind_connect=uninstrumented\nfun:__nisbind_create=uninstrumented\nfun:__nisbind_destroy=uninstrumented\nfun:__nisbind_next=uninstrumented\nfun:__nl_langinfo_l=uninstrumented\nfun:__nptl_change_stack_perm=uninstrumented\nfun:__nptl_create_event=uninstrumented\nfun:__nptl_death_event=uninstrumented\nfun:__ns_get16=uninstrumented\nfun:__ns_get32=uninstrumented\nfun:__ns_name_compress=uninstrumented\nfun:__ns_name_ntop=uninstrumented\nfun:__ns_name_pack=uninstrumented\nfun:__ns_name_pton=uninstrumented\nfun:__ns_name_skip=uninstrumented\nfun:__ns_name_uncompress=uninstrumented\nfun:__ns_name_unpack=uninstrumented\nfun:__nss_configure_lookup=uninstrumented\nfun:__nss_database_get=uninstrumented\nfun:__nss_database_lookup=uninstrumented\nfun:__nss_disable_nscd=uninstrumented\nfun:__nss_files_data_endent=uninstrumented\nfun:__nss_files_data_open=uninstrumented\nfun:__nss_files_data_put=uninstrumented\nfun:__nss_files_data_setent=uninstrumented\nfun:__nss_files_fopen=uninstrumented\nfun:__nss_group_lookup=uninstrumented\nfun:__nss_group_lookup2=uninstrumented\nfun:__nss_hash=uninstrumented\nfun:__nss_hostname_digits_dots=uninstrumented\nfun:__nss_hosts_lookup=uninstrumented\nfun:__nss_hosts_lookup2=uninstrumented\nfun:__nss_lookup=uninstrumented\nfun:__nss_lookup_function=uninstrumented\nfun:__nss_next=uninstrumented\nfun:__nss_next2=uninstrumented\nfun:__nss_parse_line_result=uninstrumented\nfun:__nss_passwd_lookup=uninstrumented\nfun:__nss_passwd_lookup2=uninstrumented\nfun:__nss_readline=uninstrumented\nfun:__nss_services_lookup2=uninstrumented\nfun:__obstack_printf_chk=uninstrumented\nfun:__obstack_vprintf_chk=uninstrumented\nfun:__open=uninstrumented\nfun:__open64=uninstrumented\nfun:__open64_2=uninstrumented\nfun:__open64_nocancel=uninstrumented\nfun:__open_2=uninstrumented\nfun:__open_catalog=uninstrumented\nfun:__open_nocancel=uninstrumented\nfun:__openat64_2=uninstrumented\nfun:__openat_2=uninstrumented\nfun:__overflow=uninstrumented\nfun:__p_cdname=uninstrumented\nfun:__p_cdnname=uninstrumented\nfun:__p_class=uninstrumented\nfun:__p_fqname=uninstrumented\nfun:__p_fqnname=uninstrumented\nfun:__p_option=uninstrumented\nfun:__p_query=uninstrumented\nfun:__p_rcode=uninstrumented\nfun:__p_secstodate=uninstrumented\nfun:__p_time=uninstrumented\nfun:__p_type=uninstrumented\nfun:__paritydi2=uninstrumented\nfun:__parityti2=uninstrumented\nfun:__pipe=uninstrumented\nfun:__poll=uninstrumented\nfun:__poll_chk=uninstrumented\nfun:__popcountdi2=uninstrumented\nfun:__popcountti2=uninstrumented\nfun:__posix_getopt=uninstrumented\nfun:__pow_finite=uninstrumented\nfun:__powf128_finite=uninstrumented\nfun:__powf_finite=uninstrumented\nfun:__powidf2=uninstrumented\nfun:__powisf2=uninstrumented\nfun:__powitf2=uninstrumented\nfun:__powixf2=uninstrumented\nfun:__powl_finite=uninstrumented\nfun:__ppoll_chk=uninstrumented\nfun:__pread64=uninstrumented\nfun:__pread64_chk=uninstrumented\nfun:__pread64_nocancel=uninstrumented\nfun:__pread_chk=uninstrumented\nfun:__prepare_niscall=uninstrumented\nfun:__printf_chk=uninstrumented\nfun:__printf_fp=uninstrumented\nfun:__profile_frequency=uninstrumented\nfun:__pthread_atfork=uninstrumented\nfun:__pthread_cleanup_routine=uninstrumented\nfun:__pthread_get_minstack=uninstrumented\nfun:__pthread_getspecific=uninstrumented\nfun:__pthread_key_create=uninstrumented\nfun:__pthread_mutex_destroy=uninstrumented\nfun:__pthread_mutex_init=uninstrumented\nfun:__pthread_mutex_lock=uninstrumented\nfun:__pthread_mutex_trylock=uninstrumented\nfun:__pthread_mutex_unlock=uninstrumented\nfun:__pthread_mutexattr_destroy=uninstrumented\nfun:__pthread_mutexattr_init=uninstrumented\nfun:__pthread_mutexattr_settype=uninstrumented\nfun:__pthread_once=uninstrumented\nfun:__pthread_register_cancel=uninstrumented\nfun:__pthread_register_cancel_defer=uninstrumented\nfun:__pthread_rwlock_destroy=uninstrumented\nfun:__pthread_rwlock_init=uninstrumented\nfun:__pthread_rwlock_rdlock=uninstrumented\nfun:__pthread_rwlock_tryrdlock=uninstrumented\nfun:__pthread_rwlock_trywrlock=uninstrumented\nfun:__pthread_rwlock_unlock=uninstrumented\nfun:__pthread_rwlock_wrlock=uninstrumented\nfun:__pthread_setspecific=uninstrumented\nfun:__pthread_unregister_cancel=uninstrumented\nfun:__pthread_unregister_cancel_restore=uninstrumented\nfun:__pthread_unwind_next=uninstrumented\nfun:__ptsname_r_chk=uninstrumented\nfun:__putlong=uninstrumented\nfun:__putshort=uninstrumented\nfun:__pwrite64=uninstrumented\nfun:__rawmemchr=uninstrumented\nfun:__read=uninstrumented\nfun:__read_chk=uninstrumented\nfun:__read_nocancel=uninstrumented\nfun:__readlink_chk=uninstrumented\nfun:__readlinkat_chk=uninstrumented\nfun:__realpath_chk=uninstrumented\nfun:__recv=uninstrumented\nfun:__recv_chk=uninstrumented\nfun:__recvfrom_chk=uninstrumented\nfun:__register_atfork=uninstrumented\nfun:__register_frame=uninstrumented\nfun:__register_frame_info=uninstrumented\nfun:__register_frame_info_bases=uninstrumented\nfun:__register_frame_info_table=uninstrumented\nfun:__register_frame_info_table_bases=uninstrumented\nfun:__register_frame_table=uninstrumented\nfun:__remainder_finite=uninstrumented\nfun:__remainderf128_finite=uninstrumented\nfun:__remainderf_finite=uninstrumented\nfun:__remainderl_finite=uninstrumented\nfun:__res_close=uninstrumented\nfun:__res_context_hostalias=uninstrumented\nfun:__res_context_mkquery=uninstrumented\nfun:__res_context_query=uninstrumented\nfun:__res_context_search=uninstrumented\nfun:__res_context_send=uninstrumented\nfun:__res_dnok=uninstrumented\nfun:__res_get_nsaddr=uninstrumented\nfun:__res_hnok=uninstrumented\nfun:__res_hostalias=uninstrumented\nfun:__res_iclose=uninstrumented\nfun:__res_init=uninstrumented\nfun:__res_isourserver=uninstrumented\nfun:__res_mailok=uninstrumented\nfun:__res_mkquery=uninstrumented\nfun:__res_nameinquery=uninstrumented\nfun:__res_nclose=uninstrumented\nfun:__res_ninit=uninstrumented\nfun:__res_nmkquery=uninstrumented\nfun:__res_nopt=uninstrumented\nfun:__res_nquery=uninstrumented\nfun:__res_nquerydomain=uninstrumented\nfun:__res_nsearch=uninstrumented\nfun:__res_nsend=uninstrumented\nfun:__res_ownok=uninstrumented\nfun:__res_queriesmatch=uninstrumented\nfun:__res_query=uninstrumented\nfun:__res_querydomain=uninstrumented\nfun:__res_randomid=uninstrumented\nfun:__res_search=uninstrumented\nfun:__res_send=uninstrumented\nfun:__res_state=uninstrumented\nfun:__resolv_context_get=uninstrumented\nfun:__resolv_context_get_override=uninstrumented\nfun:__resolv_context_get_preinit=uninstrumented\nfun:__resolv_context_put=uninstrumented\nfun:__rpc_thread_createerr=uninstrumented\nfun:__rpc_thread_svc_fdset=uninstrumented\nfun:__rpc_thread_svc_max_pollfd=uninstrumented\nfun:__rpc_thread_svc_pollfd=uninstrumented\nfun:__rtld_version_placeholder=uninstrumented\nfun:__sbrk=uninstrumented\nfun:__scalb_finite=uninstrumented\nfun:__scalbf_finite=uninstrumented\nfun:__scalbl_finite=uninstrumented\nfun:__sched_cpualloc=uninstrumented\nfun:__sched_cpucount=uninstrumented\nfun:__sched_cpufree=uninstrumented\nfun:__sched_get_priority_max=uninstrumented\nfun:__sched_get_priority_min=uninstrumented\nfun:__sched_getparam=uninstrumented\nfun:__sched_getscheduler=uninstrumented\nfun:__sched_setscheduler=uninstrumented\nfun:__sched_yield=uninstrumented\nfun:__secure_getenv=uninstrumented\nfun:__select=uninstrumented\nfun:__send=uninstrumented\nfun:__sendmmsg=uninstrumented\nfun:__setmntent=uninstrumented\nfun:__setpgid=uninstrumented\nfun:__sfp_handle_exceptions=uninstrumented\nfun:__shm_get_name=uninstrumented\nfun:__sigaction=uninstrumented\nfun:__sigaddset=uninstrumented\nfun:__sigdelset=uninstrumented\nfun:__sigismember=uninstrumented\nfun:__signbit=uninstrumented\nfun:__signbitf=uninstrumented\nfun:__signbitf128=uninstrumented\nfun:__signbitl=uninstrumented\nfun:__sigpause=uninstrumented\nfun:__sigsetjmp=uninstrumented\nfun:__sigsuspend=uninstrumented\nfun:__sigtimedwait=uninstrumented\nfun:__sinh_finite=uninstrumented\nfun:__sinhf128_finite=uninstrumented\nfun:__sinhf_finite=uninstrumented\nfun:__sinhl_finite=uninstrumented\nfun:__snprintf=uninstrumented\nfun:__snprintf_chk=uninstrumented\nfun:__socket=uninstrumented\nfun:__splitstack_block_signals=uninstrumented\nfun:__splitstack_block_signals_context=uninstrumented\nfun:__splitstack_find=uninstrumented\nfun:__splitstack_find_context=uninstrumented\nfun:__splitstack_getcontext=uninstrumented\nfun:__splitstack_makecontext=uninstrumented\nfun:__splitstack_releasecontext=uninstrumented\nfun:__splitstack_resetcontext=uninstrumented\nfun:__splitstack_setcontext=uninstrumented\nfun:__sprintf_chk=uninstrumented\nfun:__sqrt_finite=uninstrumented\nfun:__sqrtf128_finite=uninstrumented\nfun:__sqrtf_finite=uninstrumented\nfun:__sqrtl_finite=uninstrumented\nfun:__sse_resms64_12=uninstrumented\nfun:__sse_resms64_13=uninstrumented\nfun:__sse_resms64_14=uninstrumented\nfun:__sse_resms64_15=uninstrumented\nfun:__sse_resms64_16=uninstrumented\nfun:__sse_resms64_17=uninstrumented\nfun:__sse_resms64_18=uninstrumented\nfun:__sse_resms64f_12=uninstrumented\nfun:__sse_resms64f_13=uninstrumented\nfun:__sse_resms64f_14=uninstrumented\nfun:__sse_resms64f_15=uninstrumented\nfun:__sse_resms64f_16=uninstrumented\nfun:__sse_resms64f_17=uninstrumented\nfun:__sse_resms64fx_12=uninstrumented\nfun:__sse_resms64fx_13=uninstrumented\nfun:__sse_resms64fx_14=uninstrumented\nfun:__sse_resms64fx_15=uninstrumented\nfun:__sse_resms64fx_16=uninstrumented\nfun:__sse_resms64fx_17=uninstrumented\nfun:__sse_resms64x_12=uninstrumented\nfun:__sse_resms64x_13=uninstrumented\nfun:__sse_resms64x_14=uninstrumented\nfun:__sse_resms64x_15=uninstrumented\nfun:__sse_resms64x_16=uninstrumented\nfun:__sse_resms64x_17=uninstrumented\nfun:__sse_resms64x_18=uninstrumented\nfun:__sse_savms64_12=uninstrumented\nfun:__sse_savms64_13=uninstrumented\nfun:__sse_savms64_14=uninstrumented\nfun:__sse_savms64_15=uninstrumented\nfun:__sse_savms64_16=uninstrumented\nfun:__sse_savms64_17=uninstrumented\nfun:__sse_savms64_18=uninstrumented\nfun:__sse_savms64f_12=uninstrumented\nfun:__sse_savms64f_13=uninstrumented\nfun:__sse_savms64f_14=uninstrumented\nfun:__sse_savms64f_15=uninstrumented\nfun:__sse_savms64f_16=uninstrumented\nfun:__sse_savms64f_17=uninstrumented\nfun:__stack_chk_fail=uninstrumented\nfun:__stack_chk_fail_local=uninstrumented\nfun:__stack_split_initialize=uninstrumented\nfun:__statfs=uninstrumented\nfun:__stpcpy=uninstrumented\nfun:__stpcpy_chk=uninstrumented\nfun:__stpcpy_small=uninstrumented\nfun:__stpncpy=uninstrumented\nfun:__stpncpy_chk=uninstrumented\nfun:__strcasecmp=uninstrumented\nfun:__strcasecmp_l=uninstrumented\nfun:__strcasestr=uninstrumented\nfun:__strcat_chk=uninstrumented\nfun:__strcoll_l=uninstrumented\nfun:__strcpy_chk=uninstrumented\nfun:__strcpy_small=uninstrumented\nfun:__strcspn_c1=uninstrumented\nfun:__strcspn_c2=uninstrumented\nfun:__strcspn_c3=uninstrumented\nfun:__strdup=uninstrumented\nfun:__strerror_r=uninstrumented\nfun:__strfmon_l=uninstrumented\nfun:__strftime_l=uninstrumented\nfun:__strncasecmp_l=uninstrumented\nfun:__strncat_chk=uninstrumented\nfun:__strncpy_chk=uninstrumented\nfun:__strndup=uninstrumented\nfun:__strpbrk_c2=uninstrumented\nfun:__strpbrk_c3=uninstrumented\nfun:__strsep_1c=uninstrumented\nfun:__strsep_2c=uninstrumented\nfun:__strsep_3c=uninstrumented\nfun:__strsep_g=uninstrumented\nfun:__strspn_c1=uninstrumented\nfun:__strspn_c2=uninstrumented\nfun:__strspn_c3=uninstrumented\nfun:__strtod_internal=uninstrumented\nfun:__strtod_l=uninstrumented\nfun:__strtod_nan=uninstrumented\nfun:__strtof128_internal=uninstrumented\nfun:__strtof128_nan=uninstrumented\nfun:__strtof_internal=uninstrumented\nfun:__strtof_l=uninstrumented\nfun:__strtof_nan=uninstrumented\nfun:__strtok_r=uninstrumented\nfun:__strtok_r_1c=uninstrumented\nfun:__strtol_internal=uninstrumented\nfun:__strtol_l=uninstrumented\nfun:__strtold_internal=uninstrumented\nfun:__strtold_l=uninstrumented\nfun:__strtold_nan=uninstrumented\nfun:__strtoll_internal=uninstrumented\nfun:__strtoll_l=uninstrumented\nfun:__strtoul_internal=uninstrumented\nfun:__strtoul_l=uninstrumented\nfun:__strtoull_internal=uninstrumented\nfun:__strtoull_l=uninstrumented\nfun:__strverscmp=uninstrumented\nfun:__strxfrm_l=uninstrumented\nfun:__subtf3=uninstrumented\nfun:__subvdi3=uninstrumented\nfun:__subvsi3=uninstrumented\nfun:__subvti3=uninstrumented\nfun:__swprintf_chk=uninstrumented\nfun:__sym_ntop=uninstrumented\nfun:__sym_ntos=uninstrumented\nfun:__sym_ston=uninstrumented\nfun:__sysconf=uninstrumented\nfun:__sysctl=uninstrumented\nfun:__syslog_chk=uninstrumented\nfun:__sysv_signal=uninstrumented\nfun:__tdelete=uninstrumented\nfun:__tfind=uninstrumented\nfun:__tls_get_addr=uninstrumented\nfun:__toascii_l=uninstrumented\nfun:__tolower_l=uninstrumented\nfun:__toupper_l=uninstrumented\nfun:__towctrans=uninstrumented\nfun:__towctrans_l=uninstrumented\nfun:__towlower_l=uninstrumented\nfun:__towupper_l=uninstrumented\nfun:__truncdfhf2=uninstrumented\nfun:__truncdfsf2=uninstrumented\nfun:__truncsfhf2=uninstrumented\nfun:__trunctfdf2=uninstrumented\nfun:__trunctfhf2=uninstrumented\nfun:__trunctfsf2=uninstrumented\nfun:__trunctfxf2=uninstrumented\nfun:__truncxfhf2=uninstrumented\nfun:__tsearch=uninstrumented\nfun:__ttyname_r_chk=uninstrumented\nfun:__tunable_get_val=uninstrumented\nfun:__twalk=uninstrumented\nfun:__twalk_r=uninstrumented\nfun:__ucmpti2=uninstrumented\nfun:__udiv_w_sdiv=uninstrumented\nfun:__udivmodti4=uninstrumented\nfun:__udivti3=uninstrumented\nfun:__uflow=uninstrumented\nfun:__umodti3=uninstrumented\nfun:__underflow=uninstrumented\nfun:__unordtf2=uninstrumented\nfun:__uselocale=uninstrumented\nfun:__vasprintf_chk=uninstrumented\nfun:__vdprintf_chk=uninstrumented\nfun:__vfork=uninstrumented\nfun:__vfprintf_chk=uninstrumented\nfun:__vfscanf=uninstrumented\nfun:__vfwprintf_chk=uninstrumented\nfun:__vprintf_chk=uninstrumented\nfun:__vsnprintf=uninstrumented\nfun:__vsnprintf_chk=uninstrumented\nfun:__vsprintf_chk=uninstrumented\nfun:__vsscanf=uninstrumented\nfun:__vswprintf_chk=uninstrumented\nfun:__vsyslog_chk=uninstrumented\nfun:__vwprintf_chk=uninstrumented\nfun:__wait=uninstrumented\nfun:__waitpid=uninstrumented\nfun:__wcpcpy_chk=uninstrumented\nfun:__wcpncpy_chk=uninstrumented\nfun:__wcrtomb_chk=uninstrumented\nfun:__wcscasecmp_l=uninstrumented\nfun:__wcscat_chk=uninstrumented\nfun:__wcscoll_l=uninstrumented\nfun:__wcscpy_chk=uninstrumented\nfun:__wcsftime_l=uninstrumented\nfun:__wcsncasecmp_l=uninstrumented\nfun:__wcsncat_chk=uninstrumented\nfun:__wcsncpy_chk=uninstrumented\nfun:__wcsnrtombs_chk=uninstrumented\nfun:__wcsrtombs_chk=uninstrumented\nfun:__wcstod_internal=uninstrumented\nfun:__wcstod_l=uninstrumented\nfun:__wcstof128_internal=uninstrumented\nfun:__wcstof_internal=uninstrumented\nfun:__wcstof_l=uninstrumented\nfun:__wcstol_internal=uninstrumented\nfun:__wcstol_l=uninstrumented\nfun:__wcstold_internal=uninstrumented\nfun:__wcstold_l=uninstrumented\nfun:__wcstoll_internal=uninstrumented\nfun:__wcstoll_l=uninstrumented\nfun:__wcstombs_chk=uninstrumented\nfun:__wcstoul_internal=uninstrumented\nfun:__wcstoul_l=uninstrumented\nfun:__wcstoull_internal=uninstrumented\nfun:__wcstoull_l=uninstrumented\nfun:__wcsxfrm_l=uninstrumented\nfun:__wctomb_chk=uninstrumented\nfun:__wctrans_l=uninstrumented\nfun:__wctype_l=uninstrumented\nfun:__wmemcpy_chk=uninstrumented\nfun:__wmemmove_chk=uninstrumented\nfun:__wmempcpy_chk=uninstrumented\nfun:__wmemset_chk=uninstrumented\nfun:__woverflow=uninstrumented\nfun:__wprintf_chk=uninstrumented\nfun:__wrap_pthread_create=uninstrumented\nfun:__write=uninstrumented\nfun:__write_nocancel=uninstrumented\nfun:__wuflow=uninstrumented\nfun:__wunderflow=uninstrumented\nfun:__x86_get_cpuid_feature_leaf=uninstrumented\nfun:__xmknod=uninstrumented\nfun:__xmknodat=uninstrumented\nfun:__xpg_basename=uninstrumented\nfun:__xpg_sigpause=uninstrumented\nfun:__xpg_strerror_r=uninstrumented\nfun:__xstat=uninstrumented\nfun:__xstat64=uninstrumented\nfun:__y0_finite=uninstrumented\nfun:__y0f128_finite=uninstrumented\nfun:__y0f_finite=uninstrumented\nfun:__y0l_finite=uninstrumented\nfun:__y1_finite=uninstrumented\nfun:__y1f128_finite=uninstrumented\nfun:__y1f_finite=uninstrumented\nfun:__y1l_finite=uninstrumented\nfun:__yn_finite=uninstrumented\nfun:__ynf128_finite=uninstrumented\nfun:__ynf_finite=uninstrumented\nfun:__ynl_finite=uninstrumented\nfun:__yp_check=uninstrumented\nfun:_authenticate=uninstrumented\nfun:_dl_allocate_tls=uninstrumented\nfun:_dl_allocate_tls_init=uninstrumented\nfun:_dl_audit_preinit=uninstrumented\nfun:_dl_audit_symbind_alt=uninstrumented\nfun:_dl_catch_error=uninstrumented\nfun:_dl_catch_exception=uninstrumented\nfun:_dl_deallocate_tls=uninstrumented\nfun:_dl_debug_state=uninstrumented\nfun:_dl_exception_create=uninstrumented\nfun:_dl_exception_create_format=uninstrumented\nfun:_dl_exception_free=uninstrumented\nfun:_dl_fatal_printf=uninstrumented\nfun:_dl_find_dso_for_object=uninstrumented\nfun:_dl_find_object=uninstrumented\nfun:_dl_get_tls_static_info=uninstrumented\nfun:_dl_mcount=uninstrumented\nfun:_dl_mcount_wrapper=uninstrumented\nfun:_dl_mcount_wrapper_check=uninstrumented\nfun:_dl_rtld_di_serinfo=uninstrumented\nfun:_dl_signal_error=uninstrumented\nfun:_dl_signal_exception=uninstrumented\nfun:_dl_x86_get_cpu_features=uninstrumented\nfun:_exit=uninstrumented\nfun:_flushlbf=uninstrumented\nfun:_gethtbyaddr=uninstrumented\nfun:_gethtbyname=uninstrumented\nfun:_gethtbyname2=uninstrumented\nfun:_gethtent=uninstrumented\nfun:_getlong=uninstrumented\nfun:_getshort=uninstrumented\nfun:_longjmp=uninstrumented\nfun:_mcleanup=uninstrumented\nfun:_mcount=uninstrumented\nfun:_nss_dns_getcanonname_r=uninstrumented\nfun:_nss_dns_gethostbyaddr2_r=uninstrumented\nfun:_nss_dns_gethostbyaddr_r=uninstrumented\nfun:_nss_dns_gethostbyname2_r=uninstrumented\nfun:_nss_dns_gethostbyname3_r=uninstrumented\nfun:_nss_dns_gethostbyname4_r=uninstrumented\nfun:_nss_dns_gethostbyname_r=uninstrumented\nfun:_nss_dns_getnetbyaddr_r=uninstrumented\nfun:_nss_dns_getnetbyname_r=uninstrumented\nfun:_nss_files_endaliasent=uninstrumented\nfun:_nss_files_endetherent=uninstrumented\nfun:_nss_files_endgrent=uninstrumented\nfun:_nss_files_endhostent=uninstrumented\nfun:_nss_files_endnetent=uninstrumented\nfun:_nss_files_endnetgrent=uninstrumented\nfun:_nss_files_endprotoent=uninstrumented\nfun:_nss_files_endpwent=uninstrumented\nfun:_nss_files_endrpcent=uninstrumented\nfun:_nss_files_endservent=uninstrumented\nfun:_nss_files_endsgent=uninstrumented\nfun:_nss_files_endspent=uninstrumented\nfun:_nss_files_getaliasbyname_r=uninstrumented\nfun:_nss_files_getaliasent_r=uninstrumented\nfun:_nss_files_getetherent_r=uninstrumented\nfun:_nss_files_getgrent_r=uninstrumented\nfun:_nss_files_getgrgid_r=uninstrumented\nfun:_nss_files_getgrnam_r=uninstrumented\nfun:_nss_files_gethostbyaddr_r=uninstrumented\nfun:_nss_files_gethostbyname2_r=uninstrumented\nfun:_nss_files_gethostbyname3_r=uninstrumented\nfun:_nss_files_gethostbyname4_r=uninstrumented\nfun:_nss_files_gethostbyname_r=uninstrumented\nfun:_nss_files_gethostent_r=uninstrumented\nfun:_nss_files_gethostton_r=uninstrumented\nfun:_nss_files_getnetbyaddr_r=uninstrumented\nfun:_nss_files_getnetbyname_r=uninstrumented\nfun:_nss_files_getnetent_r=uninstrumented\nfun:_nss_files_getnetgrent_r=uninstrumented\nfun:_nss_files_getntohost_r=uninstrumented\nfun:_nss_files_getprotobyname_r=uninstrumented\nfun:_nss_files_getprotobynumber_r=uninstrumented\nfun:_nss_files_getprotoent_r=uninstrumented\nfun:_nss_files_getpwent_r=uninstrumented\nfun:_nss_files_getpwnam_r=uninstrumented\nfun:_nss_files_getpwuid_r=uninstrumented\nfun:_nss_files_getrpcbyname_r=uninstrumented\nfun:_nss_files_getrpcbynumber_r=uninstrumented\nfun:_nss_files_getrpcent_r=uninstrumented\nfun:_nss_files_getservbyname_r=uninstrumented\nfun:_nss_files_getservbyport_r=uninstrumented\nfun:_nss_files_getservent_r=uninstrumented\nfun:_nss_files_getsgent_r=uninstrumented\nfun:_nss_files_getsgnam_r=uninstrumented\nfun:_nss_files_getspent_r=uninstrumented\nfun:_nss_files_getspnam_r=uninstrumented\nfun:_nss_files_init=uninstrumented\nfun:_nss_files_initgroups_dyn=uninstrumented\nfun:_nss_files_parse_etherent=uninstrumented\nfun:_nss_files_parse_grent=uninstrumented\nfun:_nss_files_parse_netent=uninstrumented\nfun:_nss_files_parse_protoent=uninstrumented\nfun:_nss_files_parse_pwent=uninstrumented\nfun:_nss_files_parse_rpcent=uninstrumented\nfun:_nss_files_parse_servent=uninstrumented\nfun:_nss_files_parse_sgent=uninstrumented\nfun:_nss_files_parse_spent=uninstrumented\nfun:_nss_files_setaliasent=uninstrumented\nfun:_nss_files_setetherent=uninstrumented\nfun:_nss_files_setgrent=uninstrumented\nfun:_nss_files_sethostent=uninstrumented\nfun:_nss_files_setnetent=uninstrumented\nfun:_nss_files_setnetgrent=uninstrumented\nfun:_nss_files_setprotoent=uninstrumented\nfun:_nss_files_setpwent=uninstrumented\nfun:_nss_files_setrpcent=uninstrumented\nfun:_nss_files_setservent=uninstrumented\nfun:_nss_files_setsgent=uninstrumented\nfun:_nss_files_setspent=uninstrumented\nfun:_nss_netgroup_parseline=uninstrumented\nfun:_obstack_allocated_p=uninstrumented\nfun:_obstack_begin=uninstrumented\nfun:_obstack_begin_1=uninstrumented\nfun:_obstack_free=uninstrumented\nfun:_obstack_memory_used=uninstrumented\nfun:_obstack_newchunk=uninstrumented\nfun:_pthread_cleanup_pop=uninstrumented\nfun:_pthread_cleanup_pop_restore=uninstrumented\nfun:_pthread_cleanup_push=uninstrumented\nfun:_pthread_cleanup_push_defer=uninstrumented\nfun:_rpc_dtablesize=uninstrumented\nfun:_seterr_reply=uninstrumented\nfun:_sethtent=uninstrumented\nfun:_setjmp=uninstrumented\nfun:_tolower=uninstrumented\nfun:_toupper=uninstrumented\nfun:_xdr_ib_request=uninstrumented\nfun:_xdr_nis_result=uninstrumented\nfun:a64l=uninstrumented\nfun:abort=uninstrumented\nfun:abs=uninstrumented\nfun:accept=uninstrumented\nfun:accept4=uninstrumented\nfun:access=uninstrumented\nfun:acct=uninstrumented\nfun:acos=uninstrumented\nfun:acosf=uninstrumented\nfun:acosf128=uninstrumented\nfun:acosf32=uninstrumented\nfun:acosf32x=uninstrumented\nfun:acosf64=uninstrumented\nfun:acosf64x=uninstrumented\nfun:acosh=uninstrumented\nfun:acoshf=uninstrumented\nfun:acoshf128=uninstrumented\nfun:acoshf32=uninstrumented\nfun:acoshf32x=uninstrumented\nfun:acoshf64=uninstrumented\nfun:acoshf64x=uninstrumented\nfun:acoshl=uninstrumented\nfun:acosl=uninstrumented\nfun:addmntent=uninstrumented\nfun:addseverity=uninstrumented\nfun:adjtime=uninstrumented\nfun:adjtimex=uninstrumented\nfun:advance=uninstrumented\nfun:aio_cancel=uninstrumented\nfun:aio_cancel64=uninstrumented\nfun:aio_error=uninstrumented\nfun:aio_error64=uninstrumented\nfun:aio_fsync=uninstrumented\nfun:aio_fsync64=uninstrumented\nfun:aio_init=uninstrumented\nfun:aio_read=uninstrumented\nfun:aio_read64=uninstrumented\nfun:aio_return=uninstrumented\nfun:aio_return64=uninstrumented\nfun:aio_suspend=uninstrumented\nfun:aio_suspend64=uninstrumented\nfun:aio_write=uninstrumented\nfun:aio_write64=uninstrumented\nfun:alarm=uninstrumented\nfun:aligned_alloc=uninstrumented\nfun:alphasort=uninstrumented\nfun:alphasort64=uninstrumented\nfun:arch_prctl=uninstrumented\nfun:argp_error=uninstrumented\nfun:argp_failure=uninstrumented\nfun:argp_help=uninstrumented\nfun:argp_parse=uninstrumented\nfun:argp_state_help=uninstrumented\nfun:argp_usage=uninstrumented\nfun:argz_add=uninstrumented\nfun:argz_add_sep=uninstrumented\nfun:argz_append=uninstrumented\nfun:argz_count=uninstrumented\nfun:argz_create=uninstrumented\nfun:argz_create_sep=uninstrumented\nfun:argz_delete=uninstrumented\nfun:argz_extract=uninstrumented\nfun:argz_insert=uninstrumented\nfun:argz_next=uninstrumented\nfun:argz_replace=uninstrumented\nfun:argz_stringify=uninstrumented\nfun:asctime=uninstrumented\nfun:asctime_r=uninstrumented\nfun:asin=uninstrumented\nfun:asinf=uninstrumented\nfun:asinf128=uninstrumented\nfun:asinf32=uninstrumented\nfun:asinf32x=uninstrumented\nfun:asinf64=uninstrumented\nfun:asinf64x=uninstrumented\nfun:asinh=uninstrumented\nfun:asinhf=uninstrumented\nfun:asinhf128=uninstrumented\nfun:asinhf32=uninstrumented\nfun:asinhf32x=uninstrumented\nfun:asinhf64=uninstrumented\nfun:asinhf64x=uninstrumented\nfun:asinhl=uninstrumented\nfun:asinl=uninstrumented\nfun:asprintf=uninstrumented\nfun:at_quick_exit=uninstrumented\nfun:atan=uninstrumented\nfun:atan2=uninstrumented\nfun:atan2f=uninstrumented\nfun:atan2f128=uninstrumented\nfun:atan2f32=uninstrumented\nfun:atan2f32x=uninstrumented\nfun:atan2f64=uninstrumented\nfun:atan2f64x=uninstrumented\nfun:atan2l=uninstrumented\nfun:atanf=uninstrumented\nfun:atanf128=uninstrumented\nfun:atanf32=uninstrumented\nfun:atanf32x=uninstrumented\nfun:atanf64=uninstrumented\nfun:atanf64x=uninstrumented\nfun:atanh=uninstrumented\nfun:atanhf=uninstrumented\nfun:atanhf128=uninstrumented\nfun:atanhf32=uninstrumented\nfun:atanhf32x=uninstrumented\nfun:atanhf64=uninstrumented\nfun:atanhf64x=uninstrumented\nfun:atanhl=uninstrumented\nfun:atanl=uninstrumented\nfun:atexit=uninstrumented\nfun:atof=uninstrumented\nfun:atoi=uninstrumented\nfun:atol=uninstrumented\nfun:atoll=uninstrumented\nfun:authdes_create=uninstrumented\nfun:authdes_getucred=uninstrumented\nfun:authdes_pk_create=uninstrumented\nfun:authnone_create=uninstrumented\nfun:authunix_create=uninstrumented\nfun:authunix_create_default=uninstrumented\nfun:backtrace=uninstrumented\nfun:backtrace_symbols=uninstrumented\nfun:backtrace_symbols_fd=uninstrumented\nfun:basename=uninstrumented\nfun:bcmp=uninstrumented\nfun:bcopy=uninstrumented\nfun:bdflush=uninstrumented\nfun:bind=uninstrumented\nfun:bind_textdomain_codeset=uninstrumented\nfun:bindresvport=uninstrumented\nfun:bindtextdomain=uninstrumented\nfun:brk=uninstrumented\nfun:bsd_signal=uninstrumented\nfun:bsearch=uninstrumented\nfun:btowc=uninstrumented\nfun:bzero=uninstrumented\nfun:c16rtomb=uninstrumented\nfun:c32rtomb=uninstrumented\nfun:cabs=uninstrumented\nfun:cabsf=uninstrumented\nfun:cabsf128=uninstrumented\nfun:cabsf32=uninstrumented\nfun:cabsf32x=uninstrumented\nfun:cabsf64=uninstrumented\nfun:cabsf64x=uninstrumented\nfun:cabsl=uninstrumented\nfun:cacos=uninstrumented\nfun:cacosf=uninstrumented\nfun:cacosf128=uninstrumented\nfun:cacosf32=uninstrumented\nfun:cacosf32x=uninstrumented\nfun:cacosf64=uninstrumented\nfun:cacosf64x=uninstrumented\nfun:cacosh=uninstrumented\nfun:cacoshf=uninstrumented\nfun:cacoshf128=uninstrumented\nfun:cacoshf32=uninstrumented\nfun:cacoshf32x=uninstrumented\nfun:cacoshf64=uninstrumented\nfun:cacoshf64x=uninstrumented\nfun:cacoshl=uninstrumented\nfun:cacosl=uninstrumented\nfun:call_once=uninstrumented\nfun:calloc=uninstrumented\nfun:callrpc=uninstrumented\nfun:canonicalize=uninstrumented\nfun:canonicalize_file_name=uninstrumented\nfun:canonicalizef=uninstrumented\nfun:canonicalizef128=uninstrumented\nfun:canonicalizef32=uninstrumented\nfun:canonicalizef32x=uninstrumented\nfun:canonicalizef64=uninstrumented\nfun:canonicalizef64x=uninstrumented\nfun:canonicalizel=uninstrumented\nfun:capget=uninstrumented\nfun:capset=uninstrumented\nfun:carg=uninstrumented\nfun:cargf=uninstrumented\nfun:cargf128=uninstrumented\nfun:cargf32=uninstrumented\nfun:cargf32x=uninstrumented\nfun:cargf64=uninstrumented\nfun:cargf64x=uninstrumented\nfun:cargl=uninstrumented\nfun:casin=uninstrumented\nfun:casinf=uninstrumented\nfun:casinf128=uninstrumented\nfun:casinf32=uninstrumented\nfun:casinf32x=uninstrumented\nfun:casinf64=uninstrumented\nfun:casinf64x=uninstrumented\nfun:casinh=uninstrumented\nfun:casinhf=uninstrumented\nfun:casinhf128=uninstrumented\nfun:casinhf32=uninstrumented\nfun:casinhf32x=uninstrumented\nfun:casinhf64=uninstrumented\nfun:casinhf64x=uninstrumented\nfun:casinhl=uninstrumented\nfun:casinl=uninstrumented\nfun:catan=uninstrumented\nfun:catanf=uninstrumented\nfun:catanf128=uninstrumented\nfun:catanf32=uninstrumented\nfun:catanf32x=uninstrumented\nfun:catanf64=uninstrumented\nfun:catanf64x=uninstrumented\nfun:catanh=uninstrumented\nfun:catanhf=uninstrumented\nfun:catanhf128=uninstrumented\nfun:catanhf32=uninstrumented\nfun:catanhf32x=uninstrumented\nfun:catanhf64=uninstrumented\nfun:catanhf64x=uninstrumented\nfun:catanhl=uninstrumented\nfun:catanl=uninstrumented\nfun:catclose=uninstrumented\nfun:catgets=uninstrumented\nfun:catopen=uninstrumented\nfun:cbc_crypt=uninstrumented\nfun:cbrt=uninstrumented\nfun:cbrtf=uninstrumented\nfun:cbrtf128=uninstrumented\nfun:cbrtf32=uninstrumented\nfun:cbrtf32x=uninstrumented\nfun:cbrtf64=uninstrumented\nfun:cbrtf64x=uninstrumented\nfun:cbrtl=uninstrumented\nfun:ccos=uninstrumented\nfun:ccosf=uninstrumented\nfun:ccosf128=uninstrumented\nfun:ccosf32=uninstrumented\nfun:ccosf32x=uninstrumented\nfun:ccosf64=uninstrumented\nfun:ccosf64x=uninstrumented\nfun:ccosh=uninstrumented\nfun:ccoshf=uninstrumented\nfun:ccoshf128=uninstrumented\nfun:ccoshf32=uninstrumented\nfun:ccoshf32x=uninstrumented\nfun:ccoshf64=uninstrumented\nfun:ccoshf64x=uninstrumented\nfun:ccoshl=uninstrumented\nfun:ccosl=uninstrumented\nfun:ceil=uninstrumented\nfun:ceilf=uninstrumented\nfun:ceilf128=uninstrumented\nfun:ceilf32=uninstrumented\nfun:ceilf32x=uninstrumented\nfun:ceilf64=uninstrumented\nfun:ceilf64x=uninstrumented\nfun:ceill=uninstrumented\nfun:cexp=uninstrumented\nfun:cexpf=uninstrumented\nfun:cexpf128=uninstrumented\nfun:cexpf32=uninstrumented\nfun:cexpf32x=uninstrumented\nfun:cexpf64=uninstrumented\nfun:cexpf64x=uninstrumented\nfun:cexpl=uninstrumented\nfun:cfgetispeed=uninstrumented\nfun:cfgetospeed=uninstrumented\nfun:cfmakeraw=uninstrumented\nfun:cfree=uninstrumented\nfun:cfsetispeed=uninstrumented\nfun:cfsetospeed=uninstrumented\nfun:cfsetspeed=uninstrumented\nfun:chdir=uninstrumented\nfun:chflags=uninstrumented\nfun:chmod=uninstrumented\nfun:chown=uninstrumented\nfun:chroot=uninstrumented\nfun:cimag=uninstrumented\nfun:cimagf=uninstrumented\nfun:cimagf128=uninstrumented\nfun:cimagf32=uninstrumented\nfun:cimagf32x=uninstrumented\nfun:cimagf64=uninstrumented\nfun:cimagf64x=uninstrumented\nfun:cimagl=uninstrumented\nfun:clearenv=uninstrumented\nfun:clearerr=uninstrumented\nfun:clearerr_unlocked=uninstrumented\nfun:clnt_broadcast=uninstrumented\nfun:clnt_create=uninstrumented\nfun:clnt_pcreateerror=uninstrumented\nfun:clnt_perrno=uninstrumented\nfun:clnt_perror=uninstrumented\nfun:clnt_spcreateerror=uninstrumented\nfun:clnt_sperrno=uninstrumented\nfun:clnt_sperror=uninstrumented\nfun:clntraw_create=uninstrumented\nfun:clnttcp_create=uninstrumented\nfun:clntudp_bufcreate=uninstrumented\nfun:clntudp_create=uninstrumented\nfun:clntunix_create=uninstrumented\nfun:clock=uninstrumented\nfun:clock_adjtime=uninstrumented\nfun:clock_getcpuclockid=uninstrumented\nfun:clock_getres=uninstrumented\nfun:clock_gettime=uninstrumented\nfun:clock_nanosleep=uninstrumented\nfun:clock_settime=uninstrumented\nfun:clog=uninstrumented\nfun:clog10=uninstrumented\nfun:clog10f=uninstrumented\nfun:clog10f128=uninstrumented\nfun:clog10f32=uninstrumented\nfun:clog10f32x=uninstrumented\nfun:clog10f64=uninstrumented\nfun:clog10f64x=uninstrumented\nfun:clog10l=uninstrumented\nfun:clogf=uninstrumented\nfun:clogf128=uninstrumented\nfun:clogf32=uninstrumented\nfun:clogf32x=uninstrumented\nfun:clogf64=uninstrumented\nfun:clogf64x=uninstrumented\nfun:clogl=uninstrumented\nfun:clone=uninstrumented\nfun:close=uninstrumented\nfun:close_range=uninstrumented\nfun:closedir=uninstrumented\nfun:closefrom=uninstrumented\nfun:closelog=uninstrumented\nfun:cnd_broadcast=uninstrumented\nfun:cnd_destroy=uninstrumented\nfun:cnd_init=uninstrumented\nfun:cnd_signal=uninstrumented\nfun:cnd_timedwait=uninstrumented\nfun:cnd_wait=uninstrumented\nfun:confstr=uninstrumented\nfun:conj=uninstrumented\nfun:conjf=uninstrumented\nfun:conjf128=uninstrumented\nfun:conjf32=uninstrumented\nfun:conjf32x=uninstrumented\nfun:conjf64=uninstrumented\nfun:conjf64x=uninstrumented\nfun:conjl=uninstrumented\nfun:connect=uninstrumented\nfun:copy_file_range=uninstrumented\nfun:copysign=uninstrumented\nfun:copysignf=uninstrumented\nfun:copysignf128=uninstrumented\nfun:copysignf32=uninstrumented\nfun:copysignf32x=uninstrumented\nfun:copysignf64=uninstrumented\nfun:copysignf64x=uninstrumented\nfun:copysignl=uninstrumented\nfun:cos=uninstrumented\nfun:cosf=uninstrumented\nfun:cosf128=uninstrumented\nfun:cosf32=uninstrumented\nfun:cosf32x=uninstrumented\nfun:cosf64=uninstrumented\nfun:cosf64x=uninstrumented\nfun:cosh=uninstrumented\nfun:coshf=uninstrumented\nfun:coshf128=uninstrumented\nfun:coshf32=uninstrumented\nfun:coshf32x=uninstrumented\nfun:coshf64=uninstrumented\nfun:coshf64x=uninstrumented\nfun:coshl=uninstrumented\nfun:cosl=uninstrumented\nfun:cpow=uninstrumented\nfun:cpowf=uninstrumented\nfun:cpowf128=uninstrumented\nfun:cpowf32=uninstrumented\nfun:cpowf32x=uninstrumented\nfun:cpowf64=uninstrumented\nfun:cpowf64x=uninstrumented\nfun:cpowl=uninstrumented\nfun:cproj=uninstrumented\nfun:cprojf=uninstrumented\nfun:cprojf128=uninstrumented\nfun:cprojf32=uninstrumented\nfun:cprojf32x=uninstrumented\nfun:cprojf64=uninstrumented\nfun:cprojf64x=uninstrumented\nfun:cprojl=uninstrumented\nfun:creal=uninstrumented\nfun:crealf=uninstrumented\nfun:crealf128=uninstrumented\nfun:crealf32=uninstrumented\nfun:crealf32x=uninstrumented\nfun:crealf64=uninstrumented\nfun:crealf64x=uninstrumented\nfun:creall=uninstrumented\nfun:creat=uninstrumented\nfun:creat64=uninstrumented\nfun:create_module=uninstrumented\nfun:crypt=uninstrumented\nfun:crypt_checksalt=uninstrumented\nfun:crypt_gensalt=uninstrumented\nfun:crypt_gensalt_r=uninstrumented\nfun:crypt_gensalt_ra=uninstrumented\nfun:crypt_gensalt_rn=uninstrumented\nfun:crypt_preferred_method=uninstrumented\nfun:crypt_r=uninstrumented\nfun:crypt_ra=uninstrumented\nfun:crypt_rn=uninstrumented\nfun:csin=uninstrumented\nfun:csinf=uninstrumented\nfun:csinf128=uninstrumented\nfun:csinf32=uninstrumented\nfun:csinf32x=uninstrumented\nfun:csinf64=uninstrumented\nfun:csinf64x=uninstrumented\nfun:csinh=uninstrumented\nfun:csinhf=uninstrumented\nfun:csinhf128=uninstrumented\nfun:csinhf32=uninstrumented\nfun:csinhf32x=uninstrumented\nfun:csinhf64=uninstrumented\nfun:csinhf64x=uninstrumented\nfun:csinhl=uninstrumented\nfun:csinl=uninstrumented\nfun:csqrt=uninstrumented\nfun:csqrtf=uninstrumented\nfun:csqrtf128=uninstrumented\nfun:csqrtf32=uninstrumented\nfun:csqrtf32x=uninstrumented\nfun:csqrtf64=uninstrumented\nfun:csqrtf64x=uninstrumented\nfun:csqrtl=uninstrumented\nfun:ctan=uninstrumented\nfun:ctanf=uninstrumented\nfun:ctanf128=uninstrumented\nfun:ctanf32=uninstrumented\nfun:ctanf32x=uninstrumented\nfun:ctanf64=uninstrumented\nfun:ctanf64x=uninstrumented\nfun:ctanh=uninstrumented\nfun:ctanhf=uninstrumented\nfun:ctanhf128=uninstrumented\nfun:ctanhf32=uninstrumented\nfun:ctanhf32x=uninstrumented\nfun:ctanhf64=uninstrumented\nfun:ctanhf64x=uninstrumented\nfun:ctanhl=uninstrumented\nfun:ctanl=uninstrumented\nfun:ctermid=uninstrumented\nfun:ctime=uninstrumented\nfun:ctime_r=uninstrumented\nfun:cuserid=uninstrumented\nfun:daddl=uninstrumented\nfun:daemon=uninstrumented\nfun:dcgettext=uninstrumented\nfun:dcngettext=uninstrumented\nfun:ddivl=uninstrumented\nfun:delete_module=uninstrumented\nfun:des_setparity=uninstrumented\nfun:dfmal=uninstrumented\nfun:dgettext=uninstrumented\nfun:difftime=uninstrumented\nfun:dirfd=uninstrumented\nfun:dirname=uninstrumented\nfun:div=uninstrumented\nfun:dl_iterate_phdr=uninstrumented\nfun:dladdr=uninstrumented\nfun:dladdr1=uninstrumented\nfun:dlclose=uninstrumented\nfun:dlerror=uninstrumented\nfun:dlinfo=uninstrumented\nfun:dlmopen=uninstrumented\nfun:dlopen=uninstrumented\nfun:dlsym=uninstrumented\nfun:dlvsym=uninstrumented\nfun:dmull=uninstrumented\nfun:dn_comp=uninstrumented\nfun:dn_expand=uninstrumented\nfun:dn_skipname=uninstrumented\nfun:dngettext=uninstrumented\nfun:dprintf=uninstrumented\nfun:drand48=uninstrumented\nfun:drand48_r=uninstrumented\nfun:drem=uninstrumented\nfun:dremf=uninstrumented\nfun:dreml=uninstrumented\nfun:dsqrtl=uninstrumented\nfun:dsubl=uninstrumented\nfun:dup=uninstrumented\nfun:dup2=uninstrumented\nfun:dup3=uninstrumented\nfun:duplocale=uninstrumented\nfun:dysize=uninstrumented\nfun:eaccess=uninstrumented\nfun:ecb_crypt=uninstrumented\nfun:ecvt=uninstrumented\nfun:ecvt_r=uninstrumented\nfun:encrypt=uninstrumented\nfun:encrypt_r=uninstrumented\nfun:endaliasent=uninstrumented\nfun:endfsent=uninstrumented\nfun:endgrent=uninstrumented\nfun:endhostent=uninstrumented\nfun:endmntent=uninstrumented\nfun:endnetent=uninstrumented\nfun:endnetgrent=uninstrumented\nfun:endprotoent=uninstrumented\nfun:endpwent=uninstrumented\nfun:endrpcent=uninstrumented\nfun:endservent=uninstrumented\nfun:endsgent=uninstrumented\nfun:endspent=uninstrumented\nfun:endttyent=uninstrumented\nfun:endusershell=uninstrumented\nfun:endutent=uninstrumented\nfun:endutxent=uninstrumented\nfun:envz_add=uninstrumented\nfun:envz_entry=uninstrumented\nfun:envz_get=uninstrumented\nfun:envz_merge=uninstrumented\nfun:envz_remove=uninstrumented\nfun:envz_strip=uninstrumented\nfun:epoll_create=uninstrumented\nfun:epoll_create1=uninstrumented\nfun:epoll_ctl=uninstrumented\nfun:epoll_pwait=uninstrumented\nfun:epoll_pwait2=uninstrumented\nfun:epoll_wait=uninstrumented\nfun:erand48=uninstrumented\nfun:erand48_r=uninstrumented\nfun:erf=uninstrumented\nfun:erfc=uninstrumented\nfun:erfcf=uninstrumented\nfun:erfcf128=uninstrumented\nfun:erfcf32=uninstrumented\nfun:erfcf32x=uninstrumented\nfun:erfcf64=uninstrumented\nfun:erfcf64x=uninstrumented\nfun:erfcl=uninstrumented\nfun:erff=uninstrumented\nfun:erff128=uninstrumented\nfun:erff32=uninstrumented\nfun:erff32x=uninstrumented\nfun:erff64=uninstrumented\nfun:erff64x=uninstrumented\nfun:erfl=uninstrumented\nfun:err=uninstrumented\nfun:error=uninstrumented\nfun:error_at_line=uninstrumented\nfun:errx=uninstrumented\nfun:ether_aton=uninstrumented\nfun:ether_aton_r=uninstrumented\nfun:ether_hostton=uninstrumented\nfun:ether_line=uninstrumented\nfun:ether_ntoa=uninstrumented\nfun:ether_ntoa_r=uninstrumented\nfun:ether_ntohost=uninstrumented\nfun:euidaccess=uninstrumented\nfun:eventfd=uninstrumented\nfun:eventfd_read=uninstrumented\nfun:eventfd_write=uninstrumented\nfun:execl=uninstrumented\nfun:execle=uninstrumented\nfun:execlp=uninstrumented\nfun:execv=uninstrumented\nfun:execve=uninstrumented\nfun:execveat=uninstrumented\nfun:execvp=uninstrumented\nfun:execvpe=uninstrumented\nfun:exit=uninstrumented\nfun:exp=uninstrumented\nfun:exp10=uninstrumented\nfun:exp10f=uninstrumented\nfun:exp10f128=uninstrumented\nfun:exp10f32=uninstrumented\nfun:exp10f32x=uninstrumented\nfun:exp10f64=uninstrumented\nfun:exp10f64x=uninstrumented\nfun:exp10l=uninstrumented\nfun:exp2=uninstrumented\nfun:exp2f=uninstrumented\nfun:exp2f128=uninstrumented\nfun:exp2f32=uninstrumented\nfun:exp2f32x=uninstrumented\nfun:exp2f64=uninstrumented\nfun:exp2f64x=uninstrumented\nfun:exp2l=uninstrumented\nfun:expf=uninstrumented\nfun:expf128=uninstrumented\nfun:expf32=uninstrumented\nfun:expf32x=uninstrumented\nfun:expf64=uninstrumented\nfun:expf64x=uninstrumented\nfun:expl=uninstrumented\nfun:explicit_bzero=uninstrumented\nfun:expm1=uninstrumented\nfun:expm1f=uninstrumented\nfun:expm1f128=uninstrumented\nfun:expm1f32=uninstrumented\nfun:expm1f32x=uninstrumented\nfun:expm1f64=uninstrumented\nfun:expm1f64x=uninstrumented\nfun:expm1l=uninstrumented\nfun:f32addf128=uninstrumented\nfun:f32addf32x=uninstrumented\nfun:f32addf64=uninstrumented\nfun:f32addf64x=uninstrumented\nfun:f32divf128=uninstrumented\nfun:f32divf32x=uninstrumented\nfun:f32divf64=uninstrumented\nfun:f32divf64x=uninstrumented\nfun:f32fmaf128=uninstrumented\nfun:f32fmaf32x=uninstrumented\nfun:f32fmaf64=uninstrumented\nfun:f32fmaf64x=uninstrumented\nfun:f32mulf128=uninstrumented\nfun:f32mulf32x=uninstrumented\nfun:f32mulf64=uninstrumented\nfun:f32mulf64x=uninstrumented\nfun:f32sqrtf128=uninstrumented\nfun:f32sqrtf32x=uninstrumented\nfun:f32sqrtf64=uninstrumented\nfun:f32sqrtf64x=uninstrumented\nfun:f32subf128=uninstrumented\nfun:f32subf32x=uninstrumented\nfun:f32subf64=uninstrumented\nfun:f32subf64x=uninstrumented\nfun:f32xaddf128=uninstrumented\nfun:f32xaddf64=uninstrumented\nfun:f32xaddf64x=uninstrumented\nfun:f32xdivf128=uninstrumented\nfun:f32xdivf64=uninstrumented\nfun:f32xdivf64x=uninstrumented\nfun:f32xfmaf128=uninstrumented\nfun:f32xfmaf64=uninstrumented\nfun:f32xfmaf64x=uninstrumented\nfun:f32xmulf128=uninstrumented\nfun:f32xmulf64=uninstrumented\nfun:f32xmulf64x=uninstrumented\nfun:f32xsqrtf128=uninstrumented\nfun:f32xsqrtf64=uninstrumented\nfun:f32xsqrtf64x=uninstrumented\nfun:f32xsubf128=uninstrumented\nfun:f32xsubf64=uninstrumented\nfun:f32xsubf64x=uninstrumented\nfun:f64addf128=uninstrumented\nfun:f64addf64x=uninstrumented\nfun:f64divf128=uninstrumented\nfun:f64divf64x=uninstrumented\nfun:f64fmaf128=uninstrumented\nfun:f64fmaf64x=uninstrumented\nfun:f64mulf128=uninstrumented\nfun:f64mulf64x=uninstrumented\nfun:f64sqrtf128=uninstrumented\nfun:f64sqrtf64x=uninstrumented\nfun:f64subf128=uninstrumented\nfun:f64subf64x=uninstrumented\nfun:f64xaddf128=uninstrumented\nfun:f64xdivf128=uninstrumented\nfun:f64xfmaf128=uninstrumented\nfun:f64xmulf128=uninstrumented\nfun:f64xsqrtf128=uninstrumented\nfun:f64xsubf128=uninstrumented\nfun:fabs=uninstrumented\nfun:fabsf=uninstrumented\nfun:fabsf128=uninstrumented\nfun:fabsf32=uninstrumented\nfun:fabsf32x=uninstrumented\nfun:fabsf64=uninstrumented\nfun:fabsf64x=uninstrumented\nfun:fabsl=uninstrumented\nfun:faccessat=uninstrumented\nfun:fadd=uninstrumented\nfun:faddl=uninstrumented\nfun:fallocate=uninstrumented\nfun:fallocate64=uninstrumented\nfun:fanotify_init=uninstrumented\nfun:fanotify_mark=uninstrumented\nfun:fattach=uninstrumented\nfun:fchdir=uninstrumented\nfun:fchflags=uninstrumented\nfun:fchmod=uninstrumented\nfun:fchmodat=uninstrumented\nfun:fchown=uninstrumented\nfun:fchownat=uninstrumented\nfun:fclose=uninstrumented\nfun:fcloseall=uninstrumented\nfun:fcntl=uninstrumented\nfun:fcntl64=uninstrumented\nfun:fcrypt=uninstrumented\nfun:fcvt=uninstrumented\nfun:fcvt_r=uninstrumented\nfun:fdatasync=uninstrumented\nfun:fdetach=uninstrumented\nfun:fdim=uninstrumented\nfun:fdimf=uninstrumented\nfun:fdimf128=uninstrumented\nfun:fdimf32=uninstrumented\nfun:fdimf32x=uninstrumented\nfun:fdimf64=uninstrumented\nfun:fdimf64x=uninstrumented\nfun:fdiml=uninstrumented\nfun:fdiv=uninstrumented\nfun:fdivl=uninstrumented\nfun:fdopen=uninstrumented\nfun:fdopendir=uninstrumented\nfun:feclearexcept=uninstrumented\nfun:fedisableexcept=uninstrumented\nfun:feenableexcept=uninstrumented\nfun:fegetenv=uninstrumented\nfun:fegetexcept=uninstrumented\nfun:fegetexceptflag=uninstrumented\nfun:fegetmode=uninstrumented\nfun:fegetround=uninstrumented\nfun:feholdexcept=uninstrumented\nfun:feof=uninstrumented\nfun:feof_unlocked=uninstrumented\nfun:feraiseexcept=uninstrumented\nfun:ferror=uninstrumented\nfun:ferror_unlocked=uninstrumented\nfun:fesetenv=uninstrumented\nfun:fesetexcept=uninstrumented\nfun:fesetexceptflag=uninstrumented\nfun:fesetmode=uninstrumented\nfun:fesetround=uninstrumented\nfun:fetestexcept=uninstrumented\nfun:fetestexceptflag=uninstrumented\nfun:feupdateenv=uninstrumented\nfun:fexecve=uninstrumented\nfun:fflush=uninstrumented\nfun:fflush_unlocked=uninstrumented\nfun:ffma=uninstrumented\nfun:ffmal=uninstrumented\nfun:ffs=uninstrumented\nfun:ffsl=uninstrumented\nfun:ffsll=uninstrumented\nfun:fgetc=uninstrumented\nfun:fgetc_unlocked=uninstrumented\nfun:fgetgrent=uninstrumented\nfun:fgetgrent_r=uninstrumented\nfun:fgetpos=uninstrumented\nfun:fgetpos64=uninstrumented\nfun:fgetpwent=uninstrumented\nfun:fgetpwent_r=uninstrumented\nfun:fgets=uninstrumented\nfun:fgets_unlocked=uninstrumented\nfun:fgetsgent=uninstrumented\nfun:fgetsgent_r=uninstrumented\nfun:fgetspent=uninstrumented\nfun:fgetspent_r=uninstrumented\nfun:fgetwc=uninstrumented\nfun:fgetwc_unlocked=uninstrumented\nfun:fgetws=uninstrumented\nfun:fgetws_unlocked=uninstrumented\nfun:fgetxattr=uninstrumented\nfun:fileno=uninstrumented\nfun:fileno_unlocked=uninstrumented\nfun:finite=uninstrumented\nfun:finitef=uninstrumented\nfun:finitel=uninstrumented\nfun:flistxattr=uninstrumented\nfun:flock=uninstrumented\nfun:flockfile=uninstrumented\nfun:floor=uninstrumented\nfun:floorf=uninstrumented\nfun:floorf128=uninstrumented\nfun:floorf32=uninstrumented\nfun:floorf32x=uninstrumented\nfun:floorf64=uninstrumented\nfun:floorf64x=uninstrumented\nfun:floorl=uninstrumented\nfun:fma=uninstrumented\nfun:fmaf=uninstrumented\nfun:fmaf128=uninstrumented\nfun:fmaf32=uninstrumented\nfun:fmaf32x=uninstrumented\nfun:fmaf64=uninstrumented\nfun:fmaf64x=uninstrumented\nfun:fmal=uninstrumented\nfun:fmax=uninstrumented\nfun:fmaxf=uninstrumented\nfun:fmaxf128=uninstrumented\nfun:fmaxf32=uninstrumented\nfun:fmaxf32x=uninstrumented\nfun:fmaxf64=uninstrumented\nfun:fmaxf64x=uninstrumented\nfun:fmaximum=uninstrumented\nfun:fmaximum_mag=uninstrumented\nfun:fmaximum_mag_num=uninstrumented\nfun:fmaximum_mag_numf=uninstrumented\nfun:fmaximum_mag_numf128=uninstrumented\nfun:fmaximum_mag_numf32=uninstrumented\nfun:fmaximum_mag_numf32x=uninstrumented\nfun:fmaximum_mag_numf64=uninstrumented\nfun:fmaximum_mag_numf64x=uninstrumented\nfun:fmaximum_mag_numl=uninstrumented\nfun:fmaximum_magf=uninstrumented\nfun:fmaximum_magf128=uninstrumented\nfun:fmaximum_magf32=uninstrumented\nfun:fmaximum_magf32x=uninstrumented\nfun:fmaximum_magf64=uninstrumented\nfun:fmaximum_magf64x=uninstrumented\nfun:fmaximum_magl=uninstrumented\nfun:fmaximum_num=uninstrumented\nfun:fmaximum_numf=uninstrumented\nfun:fmaximum_numf128=uninstrumented\nfun:fmaximum_numf32=uninstrumented\nfun:fmaximum_numf32x=uninstrumented\nfun:fmaximum_numf64=uninstrumented\nfun:fmaximum_numf64x=uninstrumented\nfun:fmaximum_numl=uninstrumented\nfun:fmaximumf=uninstrumented\nfun:fmaximumf128=uninstrumented\nfun:fmaximumf32=uninstrumented\nfun:fmaximumf32x=uninstrumented\nfun:fmaximumf64=uninstrumented\nfun:fmaximumf64x=uninstrumented\nfun:fmaximuml=uninstrumented\nfun:fmaxl=uninstrumented\nfun:fmaxmag=uninstrumented\nfun:fmaxmagf=uninstrumented\nfun:fmaxmagf128=uninstrumented\nfun:fmaxmagf32=uninstrumented\nfun:fmaxmagf32x=uninstrumented\nfun:fmaxmagf64=uninstrumented\nfun:fmaxmagf64x=uninstrumented\nfun:fmaxmagl=uninstrumented\nfun:fmemopen=uninstrumented\nfun:fmin=uninstrumented\nfun:fminf=uninstrumented\nfun:fminf128=uninstrumented\nfun:fminf32=uninstrumented\nfun:fminf32x=uninstrumented\nfun:fminf64=uninstrumented\nfun:fminf64x=uninstrumented\nfun:fminimum=uninstrumented\nfun:fminimum_mag=uninstrumented\nfun:fminimum_mag_num=uninstrumented\nfun:fminimum_mag_numf=uninstrumented\nfun:fminimum_mag_numf128=uninstrumented\nfun:fminimum_mag_numf32=uninstrumented\nfun:fminimum_mag_numf32x=uninstrumented\nfun:fminimum_mag_numf64=uninstrumented\nfun:fminimum_mag_numf64x=uninstrumented\nfun:fminimum_mag_numl=uninstrumented\nfun:fminimum_magf=uninstrumented\nfun:fminimum_magf128=uninstrumented\nfun:fminimum_magf32=uninstrumented\nfun:fminimum_magf32x=uninstrumented\nfun:fminimum_magf64=uninstrumented\nfun:fminimum_magf64x=uninstrumented\nfun:fminimum_magl=uninstrumented\nfun:fminimum_num=uninstrumented\nfun:fminimum_numf=uninstrumented\nfun:fminimum_numf128=uninstrumented\nfun:fminimum_numf32=uninstrumented\nfun:fminimum_numf32x=uninstrumented\nfun:fminimum_numf64=uninstrumented\nfun:fminimum_numf64x=uninstrumented\nfun:fminimum_numl=uninstrumented\nfun:fminimumf=uninstrumented\nfun:fminimumf128=uninstrumented\nfun:fminimumf32=uninstrumented\nfun:fminimumf32x=uninstrumented\nfun:fminimumf64=uninstrumented\nfun:fminimumf64x=uninstrumented\nfun:fminimuml=uninstrumented\nfun:fminl=uninstrumented\nfun:fminmag=uninstrumented\nfun:fminmagf=uninstrumented\nfun:fminmagf128=uninstrumented\nfun:fminmagf32=uninstrumented\nfun:fminmagf32x=uninstrumented\nfun:fminmagf64=uninstrumented\nfun:fminmagf64x=uninstrumented\nfun:fminmagl=uninstrumented\nfun:fmod=uninstrumented\nfun:fmodf=uninstrumented\nfun:fmodf128=uninstrumented\nfun:fmodf32=uninstrumented\nfun:fmodf32x=uninstrumented\nfun:fmodf64=uninstrumented\nfun:fmodf64x=uninstrumented\nfun:fmodl=uninstrumented\nfun:fmtmsg=uninstrumented\nfun:fmul=uninstrumented\nfun:fmull=uninstrumented\nfun:fnmatch=uninstrumented\nfun:fopen=uninstrumented\nfun:fopen64=uninstrumented\nfun:fopencookie=uninstrumented\nfun:fork=uninstrumented\nfun:forkpty=uninstrumented\nfun:fpathconf=uninstrumented\nfun:fprintf=uninstrumented\nfun:fputc=uninstrumented\nfun:fputc_unlocked=uninstrumented\nfun:fputs=uninstrumented\nfun:fputs_unlocked=uninstrumented\nfun:fputwc=uninstrumented\nfun:fputwc_unlocked=uninstrumented\nfun:fputws=uninstrumented\nfun:fputws_unlocked=uninstrumented\nfun:fread=uninstrumented\nfun:fread_unlocked=uninstrumented\nfun:free=uninstrumented\nfun:freeaddrinfo=uninstrumented\nfun:freeifaddrs=uninstrumented\nfun:freelocale=uninstrumented\nfun:fremovexattr=uninstrumented\nfun:freopen=uninstrumented\nfun:freopen64=uninstrumented\nfun:frexp=uninstrumented\nfun:frexpf=uninstrumented\nfun:frexpf128=uninstrumented\nfun:frexpf32=uninstrumented\nfun:frexpf32x=uninstrumented\nfun:frexpf64=uninstrumented\nfun:frexpf64x=uninstrumented\nfun:frexpl=uninstrumented\nfun:fromfp=uninstrumented\nfun:fromfpf=uninstrumented\nfun:fromfpf128=uninstrumented\nfun:fromfpf32=uninstrumented\nfun:fromfpf32x=uninstrumented\nfun:fromfpf64=uninstrumented\nfun:fromfpf64x=uninstrumented\nfun:fromfpl=uninstrumented\nfun:fromfpx=uninstrumented\nfun:fromfpxf=uninstrumented\nfun:fromfpxf128=uninstrumented\nfun:fromfpxf32=uninstrumented\nfun:fromfpxf32x=uninstrumented\nfun:fromfpxf64=uninstrumented\nfun:fromfpxf64x=uninstrumented\nfun:fromfpxl=uninstrumented\nfun:fscanf=uninstrumented\nfun:fseek=uninstrumented\nfun:fseeko=uninstrumented\nfun:fseeko64=uninstrumented\nfun:fsetpos=uninstrumented\nfun:fsetpos64=uninstrumented\nfun:fsetxattr=uninstrumented\nfun:fsqrt=uninstrumented\nfun:fsqrtl=uninstrumented\nfun:fstat=uninstrumented\nfun:fstat64=uninstrumented\nfun:fstatat=uninstrumented\nfun:fstatat64=uninstrumented\nfun:fstatfs=uninstrumented\nfun:fstatfs64=uninstrumented\nfun:fstatvfs=uninstrumented\nfun:fstatvfs64=uninstrumented\nfun:fsub=uninstrumented\nfun:fsubl=uninstrumented\nfun:fsync=uninstrumented\nfun:ftell=uninstrumented\nfun:ftello=uninstrumented\nfun:ftello64=uninstrumented\nfun:ftime=uninstrumented\nfun:ftok=uninstrumented\nfun:ftruncate=uninstrumented\nfun:ftruncate64=uninstrumented\nfun:ftrylockfile=uninstrumented\nfun:fts64_children=uninstrumented\nfun:fts64_close=uninstrumented\nfun:fts64_open=uninstrumented\nfun:fts64_read=uninstrumented\nfun:fts64_set=uninstrumented\nfun:fts_children=uninstrumented\nfun:fts_close=uninstrumented\nfun:fts_open=uninstrumented\nfun:fts_read=uninstrumented\nfun:fts_set=uninstrumented\nfun:ftw=uninstrumented\nfun:ftw64=uninstrumented\nfun:funlockfile=uninstrumented\nfun:futimens=uninstrumented\nfun:futimes=uninstrumented\nfun:futimesat=uninstrumented\nfun:fwide=uninstrumented\nfun:fwprintf=uninstrumented\nfun:fwrite=uninstrumented\nfun:fwrite_unlocked=uninstrumented\nfun:fwscanf=uninstrumented\nfun:gai_cancel=uninstrumented\nfun:gai_error=uninstrumented\nfun:gai_strerror=uninstrumented\nfun:gai_suspend=uninstrumented\nfun:gamma=uninstrumented\nfun:gammaf=uninstrumented\nfun:gammal=uninstrumented\nfun:gcvt=uninstrumented\nfun:get_avphys_pages=uninstrumented\nfun:get_current_dir_name=uninstrumented\nfun:get_kernel_syms=uninstrumented\nfun:get_myaddress=uninstrumented\nfun:get_nprocs=uninstrumented\nfun:get_nprocs_conf=uninstrumented\nfun:get_phys_pages=uninstrumented\nfun:getaddrinfo=uninstrumented\nfun:getaddrinfo_a=uninstrumented\nfun:getaliasbyname=uninstrumented\nfun:getaliasbyname_r=uninstrumented\nfun:getaliasent=uninstrumented\nfun:getaliasent_r=uninstrumented\nfun:getauxval=uninstrumented\nfun:getc=uninstrumented\nfun:getc_unlocked=uninstrumented\nfun:getchar=uninstrumented\nfun:getchar_unlocked=uninstrumented\nfun:getcontext=uninstrumented\nfun:getcpu=uninstrumented\nfun:getcwd=uninstrumented\nfun:getdate=uninstrumented\nfun:getdate_r=uninstrumented\nfun:getdelim=uninstrumented\nfun:getdents64=uninstrumented\nfun:getdirentries=uninstrumented\nfun:getdirentries64=uninstrumented\nfun:getdomainname=uninstrumented\nfun:getdtablesize=uninstrumented\nfun:getegid=uninstrumented\nfun:getentropy=uninstrumented\nfun:getenv=uninstrumented\nfun:geteuid=uninstrumented\nfun:getfsent=uninstrumented\nfun:getfsfile=uninstrumented\nfun:getfsspec=uninstrumented\nfun:getgid=uninstrumented\nfun:getgrent=uninstrumented\nfun:getgrent_r=uninstrumented\nfun:getgrgid=uninstrumented\nfun:getgrgid_r=uninstrumented\nfun:getgrnam=uninstrumented\nfun:getgrnam_r=uninstrumented\nfun:getgrouplist=uninstrumented\nfun:getgroups=uninstrumented\nfun:gethostbyaddr=uninstrumented\nfun:gethostbyaddr_r=uninstrumented\nfun:gethostbyname=uninstrumented\nfun:gethostbyname2=uninstrumented\nfun:gethostbyname2_r=uninstrumented\nfun:gethostbyname_r=uninstrumented\nfun:gethostent=uninstrumented\nfun:gethostent_r=uninstrumented\nfun:gethostid=uninstrumented\nfun:gethostname=uninstrumented\nfun:getifaddrs=uninstrumented\nfun:getipv4sourcefilter=uninstrumented\nfun:getitimer=uninstrumented\nfun:getline=uninstrumented\nfun:getloadavg=uninstrumented\nfun:getlogin=uninstrumented\nfun:getlogin_r=uninstrumented\nfun:getmntent=uninstrumented\nfun:getmntent_r=uninstrumented\nfun:getmsg=uninstrumented\nfun:getnameinfo=uninstrumented\nfun:getnetbyaddr=uninstrumented\nfun:getnetbyaddr_r=uninstrumented\nfun:getnetbyname=uninstrumented\nfun:getnetbyname_r=uninstrumented\nfun:getnetent=uninstrumented\nfun:getnetent_r=uninstrumented\nfun:getnetgrent=uninstrumented\nfun:getnetgrent_r=uninstrumented\nfun:getnetname=uninstrumented\nfun:getopt=uninstrumented\nfun:getopt_long=uninstrumented\nfun:getopt_long_only=uninstrumented\nfun:getpagesize=uninstrumented\nfun:getpass=uninstrumented\nfun:getpayload=uninstrumented\nfun:getpayloadf=uninstrumented\nfun:getpayloadf128=uninstrumented\nfun:getpayloadf32=uninstrumented\nfun:getpayloadf32x=uninstrumented\nfun:getpayloadf64=uninstrumented\nfun:getpayloadf64x=uninstrumented\nfun:getpayloadl=uninstrumented\nfun:getpeername=uninstrumented\nfun:getpgid=uninstrumented\nfun:getpgrp=uninstrumented\nfun:getpid=uninstrumented\nfun:getpmsg=uninstrumented\nfun:getppid=uninstrumented\nfun:getpriority=uninstrumented\nfun:getprotobyname=uninstrumented\nfun:getprotobyname_r=uninstrumented\nfun:getprotobynumber=uninstrumented\nfun:getprotobynumber_r=uninstrumented\nfun:getprotoent=uninstrumented\nfun:getprotoent_r=uninstrumented\nfun:getpt=uninstrumented\nfun:getpublickey=uninstrumented\nfun:getpw=uninstrumented\nfun:getpwent=uninstrumented\nfun:getpwent_r=uninstrumented\nfun:getpwnam=uninstrumented\nfun:getpwnam_r=uninstrumented\nfun:getpwuid=uninstrumented\nfun:getpwuid_r=uninstrumented\nfun:getrandom=uninstrumented\nfun:getresgid=uninstrumented\nfun:getresuid=uninstrumented\nfun:getrlimit=uninstrumented\nfun:getrlimit64=uninstrumented\nfun:getrpcbyname=uninstrumented\nfun:getrpcbyname_r=uninstrumented\nfun:getrpcbynumber=uninstrumented\nfun:getrpcbynumber_r=uninstrumented\nfun:getrpcent=uninstrumented\nfun:getrpcent_r=uninstrumented\nfun:getrpcport=uninstrumented\nfun:getrusage=uninstrumented\nfun:gets=uninstrumented\nfun:getsecretkey=uninstrumented\nfun:getservbyname=uninstrumented\nfun:getservbyname_r=uninstrumented\nfun:getservbyport=uninstrumented\nfun:getservbyport_r=uninstrumented\nfun:getservent=uninstrumented\nfun:getservent_r=uninstrumented\nfun:getsgent=uninstrumented\nfun:getsgent_r=uninstrumented\nfun:getsgnam=uninstrumented\nfun:getsgnam_r=uninstrumented\nfun:getsid=uninstrumented\nfun:getsockname=uninstrumented\nfun:getsockopt=uninstrumented\nfun:getsourcefilter=uninstrumented\nfun:getspent=uninstrumented\nfun:getspent_r=uninstrumented\nfun:getspnam=uninstrumented\nfun:getspnam_r=uninstrumented\nfun:getsubopt=uninstrumented\nfun:gettext=uninstrumented\nfun:gettid=uninstrumented\nfun:gettimeofday=uninstrumented\nfun:getttyent=uninstrumented\nfun:getttynam=uninstrumented\nfun:getuid=uninstrumented\nfun:getusershell=uninstrumented\nfun:getutent=uninstrumented\nfun:getutent_r=uninstrumented\nfun:getutid=uninstrumented\nfun:getutid_r=uninstrumented\nfun:getutline=uninstrumented\nfun:getutline_r=uninstrumented\nfun:getutmp=uninstrumented\nfun:getutmpx=uninstrumented\nfun:getutxent=uninstrumented\nfun:getutxid=uninstrumented\nfun:getutxline=uninstrumented\nfun:getw=uninstrumented\nfun:getwc=uninstrumented\nfun:getwc_unlocked=uninstrumented\nfun:getwchar=uninstrumented\nfun:getwchar_unlocked=uninstrumented\nfun:getwd=uninstrumented\nfun:getxattr=uninstrumented\nfun:glob=uninstrumented\nfun:glob64=uninstrumented\nfun:glob_pattern_p=uninstrumented\nfun:globfree=uninstrumented\nfun:globfree64=uninstrumented\nfun:gmtime=uninstrumented\nfun:gmtime_r=uninstrumented\nfun:gnu_dev_major=uninstrumented\nfun:gnu_dev_makedev=uninstrumented\nfun:gnu_dev_minor=uninstrumented\nfun:gnu_get_libc_release=uninstrumented\nfun:gnu_get_libc_version=uninstrumented\nfun:grantpt=uninstrumented\nfun:group_member=uninstrumented\nfun:gsignal=uninstrumented\nfun:gtty=uninstrumented\nfun:hasmntopt=uninstrumented\nfun:hcreate=uninstrumented\nfun:hcreate_r=uninstrumented\nfun:hdestroy=uninstrumented\nfun:hdestroy_r=uninstrumented\nfun:herror=uninstrumented\nfun:host2netname=uninstrumented\nfun:hsearch=uninstrumented\nfun:hsearch_r=uninstrumented\nfun:hstrerror=uninstrumented\nfun:htonl=uninstrumented\nfun:htons=uninstrumented\nfun:hypot=uninstrumented\nfun:hypotf=uninstrumented\nfun:hypotf128=uninstrumented\nfun:hypotf32=uninstrumented\nfun:hypotf32x=uninstrumented\nfun:hypotf64=uninstrumented\nfun:hypotf64x=uninstrumented\nfun:hypotl=uninstrumented\nfun:iconv=uninstrumented\nfun:iconv_close=uninstrumented\nfun:iconv_open=uninstrumented\nfun:if_freenameindex=uninstrumented\nfun:if_indextoname=uninstrumented\nfun:if_nameindex=uninstrumented\nfun:if_nametoindex=uninstrumented\nfun:ilogb=uninstrumented\nfun:ilogbf=uninstrumented\nfun:ilogbf128=uninstrumented\nfun:ilogbf32=uninstrumented\nfun:ilogbf32x=uninstrumented\nfun:ilogbf64=uninstrumented\nfun:ilogbf64x=uninstrumented\nfun:ilogbl=uninstrumented\nfun:imaxabs=uninstrumented\nfun:imaxdiv=uninstrumented\nfun:index=uninstrumented\nfun:inet6_opt_append=uninstrumented\nfun:inet6_opt_find=uninstrumented\nfun:inet6_opt_finish=uninstrumented\nfun:inet6_opt_get_val=uninstrumented\nfun:inet6_opt_init=uninstrumented\nfun:inet6_opt_next=uninstrumented\nfun:inet6_opt_set_val=uninstrumented\nfun:inet6_option_alloc=uninstrumented\nfun:inet6_option_append=uninstrumented\nfun:inet6_option_find=uninstrumented\nfun:inet6_option_init=uninstrumented\nfun:inet6_option_next=uninstrumented\nfun:inet6_option_space=uninstrumented\nfun:inet6_rth_add=uninstrumented\nfun:inet6_rth_getaddr=uninstrumented\nfun:inet6_rth_init=uninstrumented\nfun:inet6_rth_reverse=uninstrumented\nfun:inet6_rth_segments=uninstrumented\nfun:inet6_rth_space=uninstrumented\nfun:inet_addr=uninstrumented\nfun:inet_aton=uninstrumented\nfun:inet_lnaof=uninstrumented\nfun:inet_makeaddr=uninstrumented\nfun:inet_net_ntop=uninstrumented\nfun:inet_net_pton=uninstrumented\nfun:inet_neta=uninstrumented\nfun:inet_netof=uninstrumented\nfun:inet_network=uninstrumented\nfun:inet_nsap_addr=uninstrumented\nfun:inet_nsap_ntoa=uninstrumented\nfun:inet_ntoa=uninstrumented\nfun:inet_ntop=uninstrumented\nfun:inet_pton=uninstrumented\nfun:init_module=uninstrumented\nfun:initgroups=uninstrumented\nfun:initstate=uninstrumented\nfun:initstate_r=uninstrumented\nfun:innetgr=uninstrumented\nfun:inotify_add_watch=uninstrumented\nfun:inotify_init=uninstrumented\nfun:inotify_init1=uninstrumented\nfun:inotify_rm_watch=uninstrumented\nfun:insque=uninstrumented\nfun:ioctl=uninstrumented\nfun:ioperm=uninstrumented\nfun:iopl=uninstrumented\nfun:iruserok=uninstrumented\nfun:iruserok_af=uninstrumented\nfun:isalnum=uninstrumented\nfun:isalnum_l=uninstrumented\nfun:isalpha=uninstrumented\nfun:isalpha_l=uninstrumented\nfun:isascii=uninstrumented\nfun:isastream=uninstrumented\nfun:isatty=uninstrumented\nfun:isblank=uninstrumented\nfun:isblank_l=uninstrumented\nfun:iscntrl=uninstrumented\nfun:iscntrl_l=uninstrumented\nfun:isctype=uninstrumented\nfun:isdigit=uninstrumented\nfun:isdigit_l=uninstrumented\nfun:isfdtype=uninstrumented\nfun:isgraph=uninstrumented\nfun:isgraph_l=uninstrumented\nfun:isinf=uninstrumented\nfun:isinfd128=uninstrumented\nfun:isinfd32=uninstrumented\nfun:isinfd64=uninstrumented\nfun:isinff=uninstrumented\nfun:isinfl=uninstrumented\nfun:islower=uninstrumented\nfun:islower_l=uninstrumented\nfun:isnan=uninstrumented\nfun:isnanf=uninstrumented\nfun:isnanl=uninstrumented\nfun:isprint=uninstrumented\nfun:isprint_l=uninstrumented\nfun:ispunct=uninstrumented\nfun:ispunct_l=uninstrumented\nfun:isspace=uninstrumented\nfun:isspace_l=uninstrumented\nfun:isupper=uninstrumented\nfun:isupper_l=uninstrumented\nfun:iswalnum=uninstrumented\nfun:iswalnum_l=uninstrumented\nfun:iswalpha=uninstrumented\nfun:iswalpha_l=uninstrumented\nfun:iswblank=uninstrumented\nfun:iswblank_l=uninstrumented\nfun:iswcntrl=uninstrumented\nfun:iswcntrl_l=uninstrumented\nfun:iswctype=uninstrumented\nfun:iswctype_l=uninstrumented\nfun:iswdigit=uninstrumented\nfun:iswdigit_l=uninstrumented\nfun:iswgraph=uninstrumented\nfun:iswgraph_l=uninstrumented\nfun:iswlower=uninstrumented\nfun:iswlower_l=uninstrumented\nfun:iswprint=uninstrumented\nfun:iswprint_l=uninstrumented\nfun:iswpunct=uninstrumented\nfun:iswpunct_l=uninstrumented\nfun:iswspace=uninstrumented\nfun:iswspace_l=uninstrumented\nfun:iswupper=uninstrumented\nfun:iswupper_l=uninstrumented\nfun:iswxdigit=uninstrumented\nfun:iswxdigit_l=uninstrumented\nfun:isxdigit=uninstrumented\nfun:isxdigit_l=uninstrumented\nfun:j0=uninstrumented\nfun:j0f=uninstrumented\nfun:j0f128=uninstrumented\nfun:j0f32=uninstrumented\nfun:j0f32x=uninstrumented\nfun:j0f64=uninstrumented\nfun:j0f64x=uninstrumented\nfun:j0l=uninstrumented\nfun:j1=uninstrumented\nfun:j1f=uninstrumented\nfun:j1f128=uninstrumented\nfun:j1f32=uninstrumented\nfun:j1f32x=uninstrumented\nfun:j1f64=uninstrumented\nfun:j1f64x=uninstrumented\nfun:j1l=uninstrumented\nfun:jn=uninstrumented\nfun:jnf=uninstrumented\nfun:jnf128=uninstrumented\nfun:jnf32=uninstrumented\nfun:jnf32x=uninstrumented\nfun:jnf64=uninstrumented\nfun:jnf64x=uninstrumented\nfun:jnl=uninstrumented\nfun:jrand48=uninstrumented\nfun:jrand48_r=uninstrumented\nfun:key_decryptsession=uninstrumented\nfun:key_decryptsession_pk=uninstrumented\nfun:key_encryptsession=uninstrumented\nfun:key_encryptsession_pk=uninstrumented\nfun:key_gendes=uninstrumented\nfun:key_get_conv=uninstrumented\nfun:key_secretkey_is_set=uninstrumented\nfun:key_setnet=uninstrumented\nfun:key_setsecret=uninstrumented\nfun:kill=uninstrumented\nfun:killpg=uninstrumented\nfun:klogctl=uninstrumented\nfun:l64a=uninstrumented\nfun:labs=uninstrumented\nfun:lchmod=uninstrumented\nfun:lchown=uninstrumented\nfun:lckpwdf=uninstrumented\nfun:lcong48=uninstrumented\nfun:lcong48_r=uninstrumented\nfun:ldexp=uninstrumented\nfun:ldexpf=uninstrumented\nfun:ldexpf128=uninstrumented\nfun:ldexpf32=uninstrumented\nfun:ldexpf32x=uninstrumented\nfun:ldexpf64=uninstrumented\nfun:ldexpf64x=uninstrumented\nfun:ldexpl=uninstrumented\nfun:ldiv=uninstrumented\nfun:lfind=uninstrumented\nfun:lgamma=uninstrumented\nfun:lgamma_r=uninstrumented\nfun:lgammaf=uninstrumented\nfun:lgammaf128=uninstrumented\nfun:lgammaf128_r=uninstrumented\nfun:lgammaf32=uninstrumented\nfun:lgammaf32_r=uninstrumented\nfun:lgammaf32x=uninstrumented\nfun:lgammaf32x_r=uninstrumented\nfun:lgammaf64=uninstrumented\nfun:lgammaf64_r=uninstrumented\nfun:lgammaf64x=uninstrumented\nfun:lgammaf64x_r=uninstrumented\nfun:lgammaf_r=uninstrumented\nfun:lgammal=uninstrumented\nfun:lgammal_r=uninstrumented\nfun:lgetxattr=uninstrumented\nfun:link=uninstrumented\nfun:linkat=uninstrumented\nfun:lio_listio=uninstrumented\nfun:lio_listio64=uninstrumented\nfun:listen=uninstrumented\nfun:listxattr=uninstrumented\nfun:llabs=uninstrumented\nfun:lldiv=uninstrumented\nfun:llistxattr=uninstrumented\nfun:llogb=uninstrumented\nfun:llogbf=uninstrumented\nfun:llogbf128=uninstrumented\nfun:llogbf32=uninstrumented\nfun:llogbf32x=uninstrumented\nfun:llogbf64=uninstrumented\nfun:llogbf64x=uninstrumented\nfun:llogbl=uninstrumented\nfun:llrint=uninstrumented\nfun:llrintf=uninstrumented\nfun:llrintf128=uninstrumented\nfun:llrintf32=uninstrumented\nfun:llrintf32x=uninstrumented\nfun:llrintf64=uninstrumented\nfun:llrintf64x=uninstrumented\nfun:llrintl=uninstrumented\nfun:llround=uninstrumented\nfun:llroundf=uninstrumented\nfun:llroundf128=uninstrumented\nfun:llroundf32=uninstrumented\nfun:llroundf32x=uninstrumented\nfun:llroundf64=uninstrumented\nfun:llroundf64x=uninstrumented\nfun:llroundl=uninstrumented\nfun:llseek=uninstrumented\nfun:localeconv=uninstrumented\nfun:localtime=uninstrumented\nfun:localtime_r=uninstrumented\nfun:lockf=uninstrumented\nfun:lockf64=uninstrumented\nfun:log=uninstrumented\nfun:log10=uninstrumented\nfun:log10f=uninstrumented\nfun:log10f128=uninstrumented\nfun:log10f32=uninstrumented\nfun:log10f32x=uninstrumented\nfun:log10f64=uninstrumented\nfun:log10f64x=uninstrumented\nfun:log10l=uninstrumented\nfun:log1p=uninstrumented\nfun:log1pf=uninstrumented\nfun:log1pf128=uninstrumented\nfun:log1pf32=uninstrumented\nfun:log1pf32x=uninstrumented\nfun:log1pf64=uninstrumented\nfun:log1pf64x=uninstrumented\nfun:log1pl=uninstrumented\nfun:log2=uninstrumented\nfun:log2f=uninstrumented\nfun:log2f128=uninstrumented\nfun:log2f32=uninstrumented\nfun:log2f32x=uninstrumented\nfun:log2f64=uninstrumented\nfun:log2f64x=uninstrumented\nfun:log2l=uninstrumented\nfun:logb=uninstrumented\nfun:logbf=uninstrumented\nfun:logbf128=uninstrumented\nfun:logbf32=uninstrumented\nfun:logbf32x=uninstrumented\nfun:logbf64=uninstrumented\nfun:logbf64x=uninstrumented\nfun:logbl=uninstrumented\nfun:logf=uninstrumented\nfun:logf128=uninstrumented\nfun:logf32=uninstrumented\nfun:logf32x=uninstrumented\nfun:logf64=uninstrumented\nfun:logf64x=uninstrumented\nfun:login=uninstrumented\nfun:login_tty=uninstrumented\nfun:logl=uninstrumented\nfun:logout=uninstrumented\nfun:logwtmp=uninstrumented\nfun:longjmp=uninstrumented\nfun:lrand48=uninstrumented\nfun:lrand48_r=uninstrumented\nfun:lremovexattr=uninstrumented\nfun:lrint=uninstrumented\nfun:lrintf=uninstrumented\nfun:lrintf128=uninstrumented\nfun:lrintf32=uninstrumented\nfun:lrintf32x=uninstrumented\nfun:lrintf64=uninstrumented\nfun:lrintf64x=uninstrumented\nfun:lrintl=uninstrumented\nfun:lround=uninstrumented\nfun:lroundf=uninstrumented\nfun:lroundf128=uninstrumented\nfun:lroundf32=uninstrumented\nfun:lroundf32x=uninstrumented\nfun:lroundf64=uninstrumented\nfun:lroundf64x=uninstrumented\nfun:lroundl=uninstrumented\nfun:lsearch=uninstrumented\nfun:lseek=uninstrumented\nfun:lseek64=uninstrumented\nfun:lsetxattr=uninstrumented\nfun:lstat=uninstrumented\nfun:lstat64=uninstrumented\nfun:lutimes=uninstrumented\nfun:madvise=uninstrumented\nfun:makecontext=uninstrumented\nfun:mallinfo=uninstrumented\nfun:mallinfo2=uninstrumented\nfun:malloc=uninstrumented\nfun:malloc_info=uninstrumented\nfun:malloc_stats=uninstrumented\nfun:malloc_trim=uninstrumented\nfun:malloc_usable_size=uninstrumented\nfun:mallopt=uninstrumented\nfun:matherr=uninstrumented\nfun:mblen=uninstrumented\nfun:mbrlen=uninstrumented\nfun:mbrtoc16=uninstrumented\nfun:mbrtoc32=uninstrumented\nfun:mbrtowc=uninstrumented\nfun:mbsinit=uninstrumented\nfun:mbsnrtowcs=uninstrumented\nfun:mbsrtowcs=uninstrumented\nfun:mbstowcs=uninstrumented\nfun:mbtowc=uninstrumented\nfun:mcheck=uninstrumented\nfun:mcheck_check_all=uninstrumented\nfun:mcheck_pedantic=uninstrumented\nfun:mcount=uninstrumented\nfun:memalign=uninstrumented\nfun:memccpy=uninstrumented\nfun:memchr=uninstrumented\nfun:memcmp=uninstrumented\nfun:memcpy=uninstrumented\nfun:memfd_create=uninstrumented\nfun:memfrob=uninstrumented\nfun:memmem=uninstrumented\nfun:memmove=uninstrumented\nfun:mempcpy=uninstrumented\nfun:memrchr=uninstrumented\nfun:memset=uninstrumented\nfun:mincore=uninstrumented\nfun:mkdir=uninstrumented\nfun:mkdirat=uninstrumented\nfun:mkdtemp=uninstrumented\nfun:mkfifo=uninstrumented\nfun:mkfifoat=uninstrumented\nfun:mknod=uninstrumented\nfun:mknodat=uninstrumented\nfun:mkostemp=uninstrumented\nfun:mkostemp64=uninstrumented\nfun:mkostemps=uninstrumented\nfun:mkostemps64=uninstrumented\nfun:mkstemp=uninstrumented\nfun:mkstemp64=uninstrumented\nfun:mkstemps=uninstrumented\nfun:mkstemps64=uninstrumented\nfun:mktemp=uninstrumented\nfun:mktime=uninstrumented\nfun:mlock=uninstrumented\nfun:mlock2=uninstrumented\nfun:mlockall=uninstrumented\nfun:mmap=uninstrumented\nfun:mmap64=uninstrumented\nfun:modf=uninstrumented\nfun:modff=uninstrumented\nfun:modff128=uninstrumented\nfun:modff32=uninstrumented\nfun:modff32x=uninstrumented\nfun:modff64=uninstrumented\nfun:modff64x=uninstrumented\nfun:modfl=uninstrumented\nfun:modify_ldt=uninstrumented\nfun:moncontrol=uninstrumented\nfun:monstartup=uninstrumented\nfun:mount=uninstrumented\nfun:mprobe=uninstrumented\nfun:mprotect=uninstrumented\nfun:mq_close=uninstrumented\nfun:mq_getattr=uninstrumented\nfun:mq_notify=uninstrumented\nfun:mq_open=uninstrumented\nfun:mq_receive=uninstrumented\nfun:mq_send=uninstrumented\nfun:mq_setattr=uninstrumented\nfun:mq_timedreceive=uninstrumented\nfun:mq_timedsend=uninstrumented\nfun:mq_unlink=uninstrumented\nfun:mrand48=uninstrumented\nfun:mrand48_r=uninstrumented\nfun:mremap=uninstrumented\nfun:msgctl=uninstrumented\nfun:msgget=uninstrumented\nfun:msgrcv=uninstrumented\nfun:msgsnd=uninstrumented\nfun:msync=uninstrumented\nfun:mtrace=uninstrumented\nfun:mtx_destroy=uninstrumented\nfun:mtx_init=uninstrumented\nfun:mtx_lock=uninstrumented\nfun:mtx_timedlock=uninstrumented\nfun:mtx_trylock=uninstrumented\nfun:mtx_unlock=uninstrumented\nfun:munlock=uninstrumented\nfun:munlockall=uninstrumented\nfun:munmap=uninstrumented\nfun:muntrace=uninstrumented\nfun:name_to_handle_at=uninstrumented\nfun:nan=uninstrumented\nfun:nanf=uninstrumented\nfun:nanf128=uninstrumented\nfun:nanf32=uninstrumented\nfun:nanf32x=uninstrumented\nfun:nanf64=uninstrumented\nfun:nanf64x=uninstrumented\nfun:nanl=uninstrumented\nfun:nanosleep=uninstrumented\nfun:nearbyint=uninstrumented\nfun:nearbyintf=uninstrumented\nfun:nearbyintf128=uninstrumented\nfun:nearbyintf32=uninstrumented\nfun:nearbyintf32x=uninstrumented\nfun:nearbyintf64=uninstrumented\nfun:nearbyintf64x=uninstrumented\nfun:nearbyintl=uninstrumented\nfun:netname2host=uninstrumented\nfun:netname2user=uninstrumented\nfun:newlocale=uninstrumented\nfun:nextafter=uninstrumented\nfun:nextafterf=uninstrumented\nfun:nextafterf128=uninstrumented\nfun:nextafterf32=uninstrumented\nfun:nextafterf32x=uninstrumented\nfun:nextafterf64=uninstrumented\nfun:nextafterf64x=uninstrumented\nfun:nextafterl=uninstrumented\nfun:nextdown=uninstrumented\nfun:nextdownf=uninstrumented\nfun:nextdownf128=uninstrumented\nfun:nextdownf32=uninstrumented\nfun:nextdownf32x=uninstrumented\nfun:nextdownf64=uninstrumented\nfun:nextdownf64x=uninstrumented\nfun:nextdownl=uninstrumented\nfun:nexttoward=uninstrumented\nfun:nexttowardf=uninstrumented\nfun:nexttowardl=uninstrumented\nfun:nextup=uninstrumented\nfun:nextupf=uninstrumented\nfun:nextupf128=uninstrumented\nfun:nextupf32=uninstrumented\nfun:nextupf32x=uninstrumented\nfun:nextupf64=uninstrumented\nfun:nextupf64x=uninstrumented\nfun:nextupl=uninstrumented\nfun:nfsservctl=uninstrumented\nfun:nftw=uninstrumented\nfun:nftw64=uninstrumented\nfun:ngettext=uninstrumented\nfun:nice=uninstrumented\nfun:nis_add=uninstrumented\nfun:nis_add_entry=uninstrumented\nfun:nis_addmember=uninstrumented\nfun:nis_checkpoint=uninstrumented\nfun:nis_clone_directory=uninstrumented\nfun:nis_clone_object=uninstrumented\nfun:nis_clone_result=uninstrumented\nfun:nis_creategroup=uninstrumented\nfun:nis_destroy_object=uninstrumented\nfun:nis_destroygroup=uninstrumented\nfun:nis_dir_cmp=uninstrumented\nfun:nis_domain_of=uninstrumented\nfun:nis_domain_of_r=uninstrumented\nfun:nis_first_entry=uninstrumented\nfun:nis_free_directory=uninstrumented\nfun:nis_free_object=uninstrumented\nfun:nis_free_request=uninstrumented\nfun:nis_freenames=uninstrumented\nfun:nis_freeresult=uninstrumented\nfun:nis_freeservlist=uninstrumented\nfun:nis_freetags=uninstrumented\nfun:nis_getnames=uninstrumented\nfun:nis_getservlist=uninstrumented\nfun:nis_ismember=uninstrumented\nfun:nis_leaf_of=uninstrumented\nfun:nis_leaf_of_r=uninstrumented\nfun:nis_lerror=uninstrumented\nfun:nis_list=uninstrumented\nfun:nis_local_directory=uninstrumented\nfun:nis_local_group=uninstrumented\nfun:nis_local_host=uninstrumented\nfun:nis_local_principal=uninstrumented\nfun:nis_lookup=uninstrumented\nfun:nis_mkdir=uninstrumented\nfun:nis_modify=uninstrumented\nfun:nis_modify_entry=uninstrumented\nfun:nis_name_of=uninstrumented\nfun:nis_name_of_r=uninstrumented\nfun:nis_next_entry=uninstrumented\nfun:nis_perror=uninstrumented\nfun:nis_ping=uninstrumented\nfun:nis_print_directory=uninstrumented\nfun:nis_print_entry=uninstrumented\nfun:nis_print_group=uninstrumented\nfun:nis_print_group_entry=uninstrumented\nfun:nis_print_link=uninstrumented\nfun:nis_print_object=uninstrumented\nfun:nis_print_result=uninstrumented\nfun:nis_print_rights=uninstrumented\nfun:nis_print_table=uninstrumented\nfun:nis_read_obj=uninstrumented\nfun:nis_remove=uninstrumented\nfun:nis_remove_entry=uninstrumented\nfun:nis_removemember=uninstrumented\nfun:nis_rmdir=uninstrumented\nfun:nis_servstate=uninstrumented\nfun:nis_sperrno=uninstrumented\nfun:nis_sperror=uninstrumented\nfun:nis_sperror_r=uninstrumented\nfun:nis_stats=uninstrumented\nfun:nis_verifygroup=uninstrumented\nfun:nis_write_obj=uninstrumented\nfun:nl_langinfo=uninstrumented\nfun:nl_langinfo_l=uninstrumented\nfun:nrand48=uninstrumented\nfun:nrand48_r=uninstrumented\nfun:ns_datetosecs=uninstrumented\nfun:ns_format_ttl=uninstrumented\nfun:ns_get16=uninstrumented\nfun:ns_get32=uninstrumented\nfun:ns_initparse=uninstrumented\nfun:ns_makecanon=uninstrumented\nfun:ns_msg_getflag=uninstrumented\nfun:ns_name_compress=uninstrumented\nfun:ns_name_ntol=uninstrumented\nfun:ns_name_ntop=uninstrumented\nfun:ns_name_pack=uninstrumented\nfun:ns_name_pton=uninstrumented\nfun:ns_name_rollback=uninstrumented\nfun:ns_name_skip=uninstrumented\nfun:ns_name_uncompress=uninstrumented\nfun:ns_name_unpack=uninstrumented\nfun:ns_parse_ttl=uninstrumented\nfun:ns_parserr=uninstrumented\nfun:ns_put16=uninstrumented\nfun:ns_put32=uninstrumented\nfun:ns_samedomain=uninstrumented\nfun:ns_samename=uninstrumented\nfun:ns_skiprr=uninstrumented\nfun:ns_sprintrr=uninstrumented\nfun:ns_sprintrrf=uninstrumented\nfun:ns_subdomain=uninstrumented\nfun:ntohl=uninstrumented\nfun:ntohs=uninstrumented\nfun:ntp_adjtime=uninstrumented\nfun:ntp_gettime=uninstrumented\nfun:ntp_gettimex=uninstrumented\nfun:obstack_free=uninstrumented\nfun:obstack_printf=uninstrumented\nfun:obstack_vprintf=uninstrumented\nfun:on_exit=uninstrumented\nfun:open=uninstrumented\nfun:open64=uninstrumented\nfun:open_by_handle_at=uninstrumented\nfun:open_memstream=uninstrumented\nfun:open_wmemstream=uninstrumented\nfun:openat=uninstrumented\nfun:openat64=uninstrumented\nfun:opendir=uninstrumented\nfun:openlog=uninstrumented\nfun:openpty=uninstrumented\nfun:parse_printf_format=uninstrumented\nfun:passwd2des=uninstrumented\nfun:pathconf=uninstrumented\nfun:pause=uninstrumented\nfun:pclose=uninstrumented\nfun:perror=uninstrumented\nfun:personality=uninstrumented\nfun:pipe=uninstrumented\nfun:pipe2=uninstrumented\nfun:pivot_root=uninstrumented\nfun:pkey_alloc=uninstrumented\nfun:pkey_free=uninstrumented\nfun:pkey_get=uninstrumented\nfun:pkey_mprotect=uninstrumented\nfun:pkey_set=uninstrumented\nfun:pmap_getmaps=uninstrumented\nfun:pmap_getport=uninstrumented\nfun:pmap_rmtcall=uninstrumented\nfun:pmap_set=uninstrumented\nfun:pmap_unset=uninstrumented\nfun:poll=uninstrumented\nfun:popen=uninstrumented\nfun:posix_fadvise=uninstrumented\nfun:posix_fadvise64=uninstrumented\nfun:posix_fallocate=uninstrumented\nfun:posix_fallocate64=uninstrumented\nfun:posix_madvise=uninstrumented\nfun:posix_memalign=uninstrumented\nfun:posix_openpt=uninstrumented\nfun:posix_spawn=uninstrumented\nfun:posix_spawn_file_actions_addchdir_np=uninstrumented\nfun:posix_spawn_file_actions_addclose=uninstrumented\nfun:posix_spawn_file_actions_addclosefrom_np=uninstrumented\nfun:posix_spawn_file_actions_adddup2=uninstrumented\nfun:posix_spawn_file_actions_addfchdir_np=uninstrumented\nfun:posix_spawn_file_actions_addopen=uninstrumented\nfun:posix_spawn_file_actions_addtcsetpgrp_np=uninstrumented\nfun:posix_spawn_file_actions_destroy=uninstrumented\nfun:posix_spawn_file_actions_init=uninstrumented\nfun:posix_spawnattr_destroy=uninstrumented\nfun:posix_spawnattr_getflags=uninstrumented\nfun:posix_spawnattr_getpgroup=uninstrumented\nfun:posix_spawnattr_getschedparam=uninstrumented\nfun:posix_spawnattr_getschedpolicy=uninstrumented\nfun:posix_spawnattr_getsigdefault=uninstrumented\nfun:posix_spawnattr_getsigmask=uninstrumented\nfun:posix_spawnattr_init=uninstrumented\nfun:posix_spawnattr_setflags=uninstrumented\nfun:posix_spawnattr_setpgroup=uninstrumented\nfun:posix_spawnattr_setschedparam=uninstrumented\nfun:posix_spawnattr_setschedpolicy=uninstrumented\nfun:posix_spawnattr_setsigdefault=uninstrumented\nfun:posix_spawnattr_setsigmask=uninstrumented\nfun:posix_spawnp=uninstrumented\nfun:pow=uninstrumented\nfun:pow10=uninstrumented\nfun:pow10f=uninstrumented\nfun:pow10l=uninstrumented\nfun:powf=uninstrumented\nfun:powf128=uninstrumented\nfun:powf32=uninstrumented\nfun:powf32x=uninstrumented\nfun:powf64=uninstrumented\nfun:powf64x=uninstrumented\nfun:powl=uninstrumented\nfun:ppoll=uninstrumented\nfun:prctl=uninstrumented\nfun:pread=uninstrumented\nfun:pread64=uninstrumented\nfun:preadv=uninstrumented\nfun:preadv2=uninstrumented\nfun:preadv64=uninstrumented\nfun:preadv64v2=uninstrumented\nfun:printf=uninstrumented\nfun:printf_size=uninstrumented\nfun:printf_size_info=uninstrumented\nfun:prlimit=uninstrumented\nfun:prlimit64=uninstrumented\nfun:process_vm_readv=uninstrumented\nfun:process_vm_writev=uninstrumented\nfun:profil=uninstrumented\nfun:pselect=uninstrumented\nfun:psiginfo=uninstrumented\nfun:psignal=uninstrumented\nfun:pthread_atfork=uninstrumented\nfun:pthread_attr_destroy=uninstrumented\nfun:pthread_attr_getaffinity_np=uninstrumented\nfun:pthread_attr_getdetachstate=uninstrumented\nfun:pthread_attr_getguardsize=uninstrumented\nfun:pthread_attr_getinheritsched=uninstrumented\nfun:pthread_attr_getschedparam=uninstrumented\nfun:pthread_attr_getschedpolicy=uninstrumented\nfun:pthread_attr_getscope=uninstrumented\nfun:pthread_attr_getsigmask_np=uninstrumented\nfun:pthread_attr_getstack=uninstrumented\nfun:pthread_attr_getstackaddr=uninstrumented\nfun:pthread_attr_getstacksize=uninstrumented\nfun:pthread_attr_init=uninstrumented\nfun:pthread_attr_setaffinity_np=uninstrumented\nfun:pthread_attr_setdetachstate=uninstrumented\nfun:pthread_attr_setguardsize=uninstrumented\nfun:pthread_attr_setinheritsched=uninstrumented\nfun:pthread_attr_setschedparam=uninstrumented\nfun:pthread_attr_setschedpolicy=uninstrumented\nfun:pthread_attr_setscope=uninstrumented\nfun:pthread_attr_setsigmask_np=uninstrumented\nfun:pthread_attr_setstack=uninstrumented\nfun:pthread_attr_setstackaddr=uninstrumented\nfun:pthread_attr_setstacksize=uninstrumented\nfun:pthread_barrier_destroy=uninstrumented\nfun:pthread_barrier_init=uninstrumented\nfun:pthread_barrier_wait=uninstrumented\nfun:pthread_barrierattr_destroy=uninstrumented\nfun:pthread_barrierattr_getpshared=uninstrumented\nfun:pthread_barrierattr_init=uninstrumented\nfun:pthread_barrierattr_setpshared=uninstrumented\nfun:pthread_cancel=uninstrumented\nfun:pthread_clockjoin_np=uninstrumented\nfun:pthread_cond_broadcast=uninstrumented\nfun:pthread_cond_clockwait=uninstrumented\nfun:pthread_cond_destroy=uninstrumented\nfun:pthread_cond_init=uninstrumented\nfun:pthread_cond_signal=uninstrumented\nfun:pthread_cond_timedwait=uninstrumented\nfun:pthread_cond_wait=uninstrumented\nfun:pthread_condattr_destroy=uninstrumented\nfun:pthread_condattr_getclock=uninstrumented\nfun:pthread_condattr_getpshared=uninstrumented\nfun:pthread_condattr_init=uninstrumented\nfun:pthread_condattr_setclock=uninstrumented\nfun:pthread_condattr_setpshared=uninstrumented\nfun:pthread_create=uninstrumented\nfun:pthread_detach=uninstrumented\nfun:pthread_equal=uninstrumented\nfun:pthread_exit=uninstrumented\nfun:pthread_getaffinity_np=uninstrumented\nfun:pthread_getattr_default_np=uninstrumented\nfun:pthread_getattr_np=uninstrumented\nfun:pthread_getconcurrency=uninstrumented\nfun:pthread_getcpuclockid=uninstrumented\nfun:pthread_getname_np=uninstrumented\nfun:pthread_getschedparam=uninstrumented\nfun:pthread_getspecific=uninstrumented\nfun:pthread_join=uninstrumented\nfun:pthread_key_create=uninstrumented\nfun:pthread_key_delete=uninstrumented\nfun:pthread_kill=uninstrumented\nfun:pthread_kill_other_threads_np=uninstrumented\nfun:pthread_mutex_clocklock=uninstrumented\nfun:pthread_mutex_consistent=uninstrumented\nfun:pthread_mutex_consistent_np=uninstrumented\nfun:pthread_mutex_destroy=uninstrumented\nfun:pthread_mutex_getprioceiling=uninstrumented\nfun:pthread_mutex_init=uninstrumented\nfun:pthread_mutex_lock=uninstrumented\nfun:pthread_mutex_setprioceiling=uninstrumented\nfun:pthread_mutex_timedlock=uninstrumented\nfun:pthread_mutex_trylock=uninstrumented\nfun:pthread_mutex_unlock=uninstrumented\nfun:pthread_mutexattr_destroy=uninstrumented\nfun:pthread_mutexattr_getkind_np=uninstrumented\nfun:pthread_mutexattr_getprioceiling=uninstrumented\nfun:pthread_mutexattr_getprotocol=uninstrumented\nfun:pthread_mutexattr_getpshared=uninstrumented\nfun:pthread_mutexattr_getrobust=uninstrumented\nfun:pthread_mutexattr_getrobust_np=uninstrumented\nfun:pthread_mutexattr_gettype=uninstrumented\nfun:pthread_mutexattr_init=uninstrumented\nfun:pthread_mutexattr_setkind_np=uninstrumented\nfun:pthread_mutexattr_setprioceiling=uninstrumented\nfun:pthread_mutexattr_setprotocol=uninstrumented\nfun:pthread_mutexattr_setpshared=uninstrumented\nfun:pthread_mutexattr_setrobust=uninstrumented\nfun:pthread_mutexattr_setrobust_np=uninstrumented\nfun:pthread_mutexattr_settype=uninstrumented\nfun:pthread_once=uninstrumented\nfun:pthread_rwlock_clockrdlock=uninstrumented\nfun:pthread_rwlock_clockwrlock=uninstrumented\nfun:pthread_rwlock_destroy=uninstrumented\nfun:pthread_rwlock_init=uninstrumented\nfun:pthread_rwlock_rdlock=uninstrumented\nfun:pthread_rwlock_timedrdlock=uninstrumented\nfun:pthread_rwlock_timedwrlock=uninstrumented\nfun:pthread_rwlock_tryrdlock=uninstrumented\nfun:pthread_rwlock_trywrlock=uninstrumented\nfun:pthread_rwlock_unlock=uninstrumented\nfun:pthread_rwlock_wrlock=uninstrumented\nfun:pthread_rwlockattr_destroy=uninstrumented\nfun:pthread_rwlockattr_getkind_np=uninstrumented\nfun:pthread_rwlockattr_getpshared=uninstrumented\nfun:pthread_rwlockattr_init=uninstrumented\nfun:pthread_rwlockattr_setkind_np=uninstrumented\nfun:pthread_rwlockattr_setpshared=uninstrumented\nfun:pthread_self=uninstrumented\nfun:pthread_setaffinity_np=uninstrumented\nfun:pthread_setattr_default_np=uninstrumented\nfun:pthread_setcancelstate=uninstrumented\nfun:pthread_setcanceltype=uninstrumented\nfun:pthread_setconcurrency=uninstrumented\nfun:pthread_setname_np=uninstrumented\nfun:pthread_setschedparam=uninstrumented\nfun:pthread_setschedprio=uninstrumented\nfun:pthread_setspecific=uninstrumented\nfun:pthread_sigmask=uninstrumented\nfun:pthread_sigqueue=uninstrumented\nfun:pthread_spin_destroy=uninstrumented\nfun:pthread_spin_init=uninstrumented\nfun:pthread_spin_lock=uninstrumented\nfun:pthread_spin_trylock=uninstrumented\nfun:pthread_spin_unlock=uninstrumented\nfun:pthread_testcancel=uninstrumented\nfun:pthread_timedjoin_np=uninstrumented\nfun:pthread_tryjoin_np=uninstrumented\nfun:pthread_yield=uninstrumented\nfun:ptrace=uninstrumented\nfun:ptsname=uninstrumented\nfun:ptsname_r=uninstrumented\nfun:putc=uninstrumented\nfun:putc_unlocked=uninstrumented\nfun:putchar=uninstrumented\nfun:putchar_unlocked=uninstrumented\nfun:putenv=uninstrumented\nfun:putgrent=uninstrumented\nfun:putmsg=uninstrumented\nfun:putpmsg=uninstrumented\nfun:putpwent=uninstrumented\nfun:puts=uninstrumented\nfun:putsgent=uninstrumented\nfun:putspent=uninstrumented\nfun:pututline=uninstrumented\nfun:pututxline=uninstrumented\nfun:putw=uninstrumented\nfun:putwc=uninstrumented\nfun:putwc_unlocked=uninstrumented\nfun:putwchar=uninstrumented\nfun:putwchar_unlocked=uninstrumented\nfun:pvalloc=uninstrumented\nfun:pwrite=uninstrumented\nfun:pwrite64=uninstrumented\nfun:pwritev=uninstrumented\nfun:pwritev2=uninstrumented\nfun:pwritev64=uninstrumented\nfun:pwritev64v2=uninstrumented\nfun:qecvt=uninstrumented\nfun:qecvt_r=uninstrumented\nfun:qfcvt=uninstrumented\nfun:qfcvt_r=uninstrumented\nfun:qgcvt=uninstrumented\nfun:qsort=uninstrumented\nfun:qsort_r=uninstrumented\nfun:query_module=uninstrumented\nfun:quick_exit=uninstrumented\nfun:quotactl=uninstrumented\nfun:raise=uninstrumented\nfun:rand=uninstrumented\nfun:rand_r=uninstrumented\nfun:random=uninstrumented\nfun:random_r=uninstrumented\nfun:rawmemchr=uninstrumented\nfun:rcmd=uninstrumented\nfun:rcmd_af=uninstrumented\nfun:re_comp=uninstrumented\nfun:re_compile_fastmap=uninstrumented\nfun:re_compile_pattern=uninstrumented\nfun:re_exec=uninstrumented\nfun:re_match=uninstrumented\nfun:re_match_2=uninstrumented\nfun:re_search=uninstrumented\nfun:re_search_2=uninstrumented\nfun:re_set_registers=uninstrumented\nfun:re_set_syntax=uninstrumented\nfun:read=uninstrumented\nfun:readColdStartFile=uninstrumented\nfun:readahead=uninstrumented\nfun:readdir=uninstrumented\nfun:readdir64=uninstrumented\nfun:readdir64_r=uninstrumented\nfun:readdir_r=uninstrumented\nfun:readlink=uninstrumented\nfun:readlinkat=uninstrumented\nfun:readv=uninstrumented\nfun:realloc=uninstrumented\nfun:reallocarray=uninstrumented\nfun:realpath=uninstrumented\nfun:reboot=uninstrumented\nfun:recv=uninstrumented\nfun:recvfrom=uninstrumented\nfun:recvmmsg=uninstrumented\nfun:recvmsg=uninstrumented\nfun:regcomp=uninstrumented\nfun:regerror=uninstrumented\nfun:regexec=uninstrumented\nfun:regfree=uninstrumented\nfun:register_printf_function=uninstrumented\nfun:register_printf_modifier=uninstrumented\nfun:register_printf_specifier=uninstrumented\nfun:register_printf_type=uninstrumented\nfun:registerrpc=uninstrumented\nfun:remainder=uninstrumented\nfun:remainderf=uninstrumented\nfun:remainderf128=uninstrumented\nfun:remainderf32=uninstrumented\nfun:remainderf32x=uninstrumented\nfun:remainderf64=uninstrumented\nfun:remainderf64x=uninstrumented\nfun:remainderl=uninstrumented\nfun:remap_file_pages=uninstrumented\nfun:remove=uninstrumented\nfun:removexattr=uninstrumented\nfun:remque=uninstrumented\nfun:remquo=uninstrumented\nfun:remquof=uninstrumented\nfun:remquof128=uninstrumented\nfun:remquof32=uninstrumented\nfun:remquof32x=uninstrumented\nfun:remquof64=uninstrumented\nfun:remquof64x=uninstrumented\nfun:remquol=uninstrumented\nfun:rename=uninstrumented\nfun:renameat=uninstrumented\nfun:renameat2=uninstrumented\nfun:res_dnok=uninstrumented\nfun:res_gethostbyaddr=uninstrumented\nfun:res_gethostbyname=uninstrumented\nfun:res_gethostbyname2=uninstrumented\nfun:res_hnok=uninstrumented\nfun:res_mailok=uninstrumented\nfun:res_mkquery=uninstrumented\nfun:res_nmkquery=uninstrumented\nfun:res_nquery=uninstrumented\nfun:res_nquerydomain=uninstrumented\nfun:res_nsearch=uninstrumented\nfun:res_nsend=uninstrumented\nfun:res_ownok=uninstrumented\nfun:res_query=uninstrumented\nfun:res_querydomain=uninstrumented\nfun:res_search=uninstrumented\nfun:res_send=uninstrumented\nfun:res_send_setqhook=uninstrumented\nfun:res_send_setrhook=uninstrumented\nfun:revoke=uninstrumented\nfun:rewind=uninstrumented\nfun:rewinddir=uninstrumented\nfun:rexec=uninstrumented\nfun:rexec_af=uninstrumented\nfun:rindex=uninstrumented\nfun:rint=uninstrumented\nfun:rintf=uninstrumented\nfun:rintf128=uninstrumented\nfun:rintf32=uninstrumented\nfun:rintf32x=uninstrumented\nfun:rintf64=uninstrumented\nfun:rintf64x=uninstrumented\nfun:rintl=uninstrumented\nfun:rmdir=uninstrumented\nfun:round=uninstrumented\nfun:roundeven=uninstrumented\nfun:roundevenf=uninstrumented\nfun:roundevenf128=uninstrumented\nfun:roundevenf32=uninstrumented\nfun:roundevenf32x=uninstrumented\nfun:roundevenf64=uninstrumented\nfun:roundevenf64x=uninstrumented\nfun:roundevenl=uninstrumented\nfun:roundf=uninstrumented\nfun:roundf128=uninstrumented\nfun:roundf32=uninstrumented\nfun:roundf32x=uninstrumented\nfun:roundf64=uninstrumented\nfun:roundf64x=uninstrumented\nfun:roundl=uninstrumented\nfun:rpmatch=uninstrumented\nfun:rresvport=uninstrumented\nfun:rresvport_af=uninstrumented\nfun:rtime=uninstrumented\nfun:ruserok=uninstrumented\nfun:ruserok_af=uninstrumented\nfun:ruserpass=uninstrumented\nfun:sbrk=uninstrumented\nfun:scalb=uninstrumented\nfun:scalbf=uninstrumented\nfun:scalbl=uninstrumented\nfun:scalbln=uninstrumented\nfun:scalblnf=uninstrumented\nfun:scalblnf128=uninstrumented\nfun:scalblnf32=uninstrumented\nfun:scalblnf32x=uninstrumented\nfun:scalblnf64=uninstrumented\nfun:scalblnf64x=uninstrumented\nfun:scalblnl=uninstrumented\nfun:scalbn=uninstrumented\nfun:scalbnf=uninstrumented\nfun:scalbnf128=uninstrumented\nfun:scalbnf32=uninstrumented\nfun:scalbnf32x=uninstrumented\nfun:scalbnf64=uninstrumented\nfun:scalbnf64x=uninstrumented\nfun:scalbnl=uninstrumented\nfun:scandir=uninstrumented\nfun:scandir64=uninstrumented\nfun:scandirat=uninstrumented\nfun:scandirat64=uninstrumented\nfun:scanf=uninstrumented\nfun:sched_get_priority_max=uninstrumented\nfun:sched_get_priority_min=uninstrumented\nfun:sched_getaffinity=uninstrumented\nfun:sched_getcpu=uninstrumented\nfun:sched_getparam=uninstrumented\nfun:sched_getscheduler=uninstrumented\nfun:sched_rr_get_interval=uninstrumented\nfun:sched_setaffinity=uninstrumented\nfun:sched_setparam=uninstrumented\nfun:sched_setscheduler=uninstrumented\nfun:sched_yield=uninstrumented\nfun:secure_getenv=uninstrumented\nfun:seed48=uninstrumented\nfun:seed48_r=uninstrumented\nfun:seekdir=uninstrumented\nfun:select=uninstrumented\nfun:sem_clockwait=uninstrumented\nfun:sem_close=uninstrumented\nfun:sem_destroy=uninstrumented\nfun:sem_getvalue=uninstrumented\nfun:sem_init=uninstrumented\nfun:sem_open=uninstrumented\nfun:sem_post=uninstrumented\nfun:sem_timedwait=uninstrumented\nfun:sem_trywait=uninstrumented\nfun:sem_unlink=uninstrumented\nfun:sem_wait=uninstrumented\nfun:semctl=uninstrumented\nfun:semget=uninstrumented\nfun:semop=uninstrumented\nfun:semtimedop=uninstrumented\nfun:send=uninstrumented\nfun:sendfile=uninstrumented\nfun:sendfile64=uninstrumented\nfun:sendmmsg=uninstrumented\nfun:sendmsg=uninstrumented\nfun:sendto=uninstrumented\nfun:setaliasent=uninstrumented\nfun:setbuf=uninstrumented\nfun:setbuffer=uninstrumented\nfun:setcontext=uninstrumented\nfun:setdomainname=uninstrumented\nfun:setegid=uninstrumented\nfun:setenv=uninstrumented\nfun:seteuid=uninstrumented\nfun:setfsent=uninstrumented\nfun:setfsgid=uninstrumented\nfun:setfsuid=uninstrumented\nfun:setgid=uninstrumented\nfun:setgrent=uninstrumented\nfun:setgroups=uninstrumented\nfun:sethostent=uninstrumented\nfun:sethostid=uninstrumented\nfun:sethostname=uninstrumented\nfun:setipv4sourcefilter=uninstrumented\nfun:setitimer=uninstrumented\nfun:setjmp=uninstrumented\nfun:setkey=uninstrumented\nfun:setkey_r=uninstrumented\nfun:setlinebuf=uninstrumented\nfun:setlocale=uninstrumented\nfun:setlogin=uninstrumented\nfun:setlogmask=uninstrumented\nfun:setmntent=uninstrumented\nfun:setnetent=uninstrumented\nfun:setnetgrent=uninstrumented\nfun:setns=uninstrumented\nfun:setpayload=uninstrumented\nfun:setpayloadf=uninstrumented\nfun:setpayloadf128=uninstrumented\nfun:setpayloadf32=uninstrumented\nfun:setpayloadf32x=uninstrumented\nfun:setpayloadf64=uninstrumented\nfun:setpayloadf64x=uninstrumented\nfun:setpayloadl=uninstrumented\nfun:setpayloadsig=uninstrumented\nfun:setpayloadsigf=uninstrumented\nfun:setpayloadsigf128=uninstrumented\nfun:setpayloadsigf32=uninstrumented\nfun:setpayloadsigf32x=uninstrumented\nfun:setpayloadsigf64=uninstrumented\nfun:setpayloadsigf64x=uninstrumented\nfun:setpayloadsigl=uninstrumented\nfun:setpgid=uninstrumented\nfun:setpgrp=uninstrumented\nfun:setpriority=uninstrumented\nfun:setprotoent=uninstrumented\nfun:setpwent=uninstrumented\nfun:setregid=uninstrumented\nfun:setresgid=uninstrumented\nfun:setresuid=uninstrumented\nfun:setreuid=uninstrumented\nfun:setrlimit=uninstrumented\nfun:setrlimit64=uninstrumented\nfun:setrpcent=uninstrumented\nfun:setservent=uninstrumented\nfun:setsgent=uninstrumented\nfun:setsid=uninstrumented\nfun:setsockopt=uninstrumented\nfun:setsourcefilter=uninstrumented\nfun:setspent=uninstrumented\nfun:setstate=uninstrumented\nfun:setstate_r=uninstrumented\nfun:settimeofday=uninstrumented\nfun:setttyent=uninstrumented\nfun:setuid=uninstrumented\nfun:setusershell=uninstrumented\nfun:setutent=uninstrumented\nfun:setutxent=uninstrumented\nfun:setvbuf=uninstrumented\nfun:setxattr=uninstrumented\nfun:sgetsgent=uninstrumented\nfun:sgetsgent_r=uninstrumented\nfun:sgetspent=uninstrumented\nfun:sgetspent_r=uninstrumented\nfun:shm_open=uninstrumented\nfun:shm_unlink=uninstrumented\nfun:shmat=uninstrumented\nfun:shmctl=uninstrumented\nfun:shmdt=uninstrumented\nfun:shmget=uninstrumented\nfun:shutdown=uninstrumented\nfun:sigabbrev_np=uninstrumented\nfun:sigaction=uninstrumented\nfun:sigaddset=uninstrumented\nfun:sigaltstack=uninstrumented\nfun:sigandset=uninstrumented\nfun:sigblock=uninstrumented\nfun:sigdelset=uninstrumented\nfun:sigdescr_np=uninstrumented\nfun:sigemptyset=uninstrumented\nfun:sigfillset=uninstrumented\nfun:siggetmask=uninstrumented\nfun:sighold=uninstrumented\nfun:sigignore=uninstrumented\nfun:siginterrupt=uninstrumented\nfun:sigisemptyset=uninstrumented\nfun:sigismember=uninstrumented\nfun:siglongjmp=uninstrumented\nfun:signal=uninstrumented\nfun:signalfd=uninstrumented\nfun:significand=uninstrumented\nfun:significandf=uninstrumented\nfun:significandl=uninstrumented\nfun:sigorset=uninstrumented\nfun:sigpause=uninstrumented\nfun:sigpending=uninstrumented\nfun:sigprocmask=uninstrumented\nfun:sigqueue=uninstrumented\nfun:sigrelse=uninstrumented\nfun:sigreturn=uninstrumented\nfun:sigset=uninstrumented\nfun:sigsetmask=uninstrumented\nfun:sigstack=uninstrumented\nfun:sigsuspend=uninstrumented\nfun:sigtimedwait=uninstrumented\nfun:sigvec=uninstrumented\nfun:sigwait=uninstrumented\nfun:sigwaitinfo=uninstrumented\nfun:sin=uninstrumented\nfun:sincos=uninstrumented\nfun:sincosf=uninstrumented\nfun:sincosf128=uninstrumented\nfun:sincosf32=uninstrumented\nfun:sincosf32x=uninstrumented\nfun:sincosf64=uninstrumented\nfun:sincosf64x=uninstrumented\nfun:sincosl=uninstrumented\nfun:sinf=uninstrumented\nfun:sinf128=uninstrumented\nfun:sinf32=uninstrumented\nfun:sinf32x=uninstrumented\nfun:sinf64=uninstrumented\nfun:sinf64x=uninstrumented\nfun:sinh=uninstrumented\nfun:sinhf=uninstrumented\nfun:sinhf128=uninstrumented\nfun:sinhf32=uninstrumented\nfun:sinhf32x=uninstrumented\nfun:sinhf64=uninstrumented\nfun:sinhf64x=uninstrumented\nfun:sinhl=uninstrumented\nfun:sinl=uninstrumented\nfun:sleep=uninstrumented\nfun:snprintf=uninstrumented\nfun:sockatmark=uninstrumented\nfun:socket=uninstrumented\nfun:socketpair=uninstrumented\nfun:splice=uninstrumented\nfun:sprintf=uninstrumented\nfun:sprofil=uninstrumented\nfun:sqrt=uninstrumented\nfun:sqrtf=uninstrumented\nfun:sqrtf128=uninstrumented\nfun:sqrtf32=uninstrumented\nfun:sqrtf32x=uninstrumented\nfun:sqrtf64=uninstrumented\nfun:sqrtf64x=uninstrumented\nfun:sqrtl=uninstrumented\nfun:srand=uninstrumented\nfun:srand48=uninstrumented\nfun:srand48_r=uninstrumented\nfun:srandom=uninstrumented\nfun:srandom_r=uninstrumented\nfun:sscanf=uninstrumented\nfun:ssignal=uninstrumented\nfun:sstk=uninstrumented\nfun:stat=uninstrumented\nfun:stat64=uninstrumented\nfun:statfs=uninstrumented\nfun:statfs64=uninstrumented\nfun:statvfs=uninstrumented\nfun:statvfs64=uninstrumented\nfun:statx=uninstrumented\nfun:step=uninstrumented\nfun:stime=uninstrumented\nfun:stpcpy=uninstrumented\nfun:stpncpy=uninstrumented\nfun:strcasecmp=uninstrumented\nfun:strcasecmp_l=uninstrumented\nfun:strcasestr=uninstrumented\nfun:strcat=uninstrumented\nfun:strchr=uninstrumented\nfun:strchrnul=uninstrumented\nfun:strcmp=uninstrumented\nfun:strcoll=uninstrumented\nfun:strcoll_l=uninstrumented\nfun:strcpy=uninstrumented\nfun:strcspn=uninstrumented\nfun:strdup=uninstrumented\nfun:strerror=uninstrumented\nfun:strerror_l=uninstrumented\nfun:strerror_r=uninstrumented\nfun:strerrordesc_np=uninstrumented\nfun:strerrorname_np=uninstrumented\nfun:strfmon=uninstrumented\nfun:strfmon_l=uninstrumented\nfun:strfromd=uninstrumented\nfun:strfromf=uninstrumented\nfun:strfromf128=uninstrumented\nfun:strfromf32=uninstrumented\nfun:strfromf32x=uninstrumented\nfun:strfromf64=uninstrumented\nfun:strfromf64x=uninstrumented\nfun:strfroml=uninstrumented\nfun:strfry=uninstrumented\nfun:strftime=uninstrumented\nfun:strftime_l=uninstrumented\nfun:strlen=uninstrumented\nfun:strncasecmp=uninstrumented\nfun:strncasecmp_l=uninstrumented\nfun:strncat=uninstrumented\nfun:strncmp=uninstrumented\nfun:strncpy=uninstrumented\nfun:strndup=uninstrumented\nfun:strnlen=uninstrumented\nfun:strpbrk=uninstrumented\nfun:strptime=uninstrumented\nfun:strptime_l=uninstrumented\nfun:strrchr=uninstrumented\nfun:strsep=uninstrumented\nfun:strsignal=uninstrumented\nfun:strspn=uninstrumented\nfun:strstr=uninstrumented\nfun:strtod=uninstrumented\nfun:strtod_l=uninstrumented\nfun:strtof=uninstrumented\nfun:strtof128=uninstrumented\nfun:strtof128_l=uninstrumented\nfun:strtof32=uninstrumented\nfun:strtof32_l=uninstrumented\nfun:strtof32x=uninstrumented\nfun:strtof32x_l=uninstrumented\nfun:strtof64=uninstrumented\nfun:strtof64_l=uninstrumented\nfun:strtof64x=uninstrumented\nfun:strtof64x_l=uninstrumented\nfun:strtof_l=uninstrumented\nfun:strtoimax=uninstrumented\nfun:strtok=uninstrumented\nfun:strtok_r=uninstrumented\nfun:strtol=uninstrumented\nfun:strtol_l=uninstrumented\nfun:strtold=uninstrumented\nfun:strtold_l=uninstrumented\nfun:strtoll=uninstrumented\nfun:strtoll_l=uninstrumented\nfun:strtoq=uninstrumented\nfun:strtoul=uninstrumented\nfun:strtoul_l=uninstrumented\nfun:strtoull=uninstrumented\nfun:strtoull_l=uninstrumented\nfun:strtoumax=uninstrumented\nfun:strtouq=uninstrumented\nfun:strverscmp=uninstrumented\nfun:strxfrm=uninstrumented\nfun:strxfrm_l=uninstrumented\nfun:stty=uninstrumented\nfun:svc_exit=uninstrumented\nfun:svc_getreq=uninstrumented\nfun:svc_getreq_common=uninstrumented\nfun:svc_getreq_poll=uninstrumented\nfun:svc_getreqset=uninstrumented\nfun:svc_register=uninstrumented\nfun:svc_run=uninstrumented\nfun:svc_sendreply=uninstrumented\nfun:svc_unregister=uninstrumented\nfun:svcerr_auth=uninstrumented\nfun:svcerr_decode=uninstrumented\nfun:svcerr_noproc=uninstrumented\nfun:svcerr_noprog=uninstrumented\nfun:svcerr_progvers=uninstrumented\nfun:svcerr_systemerr=uninstrumented\nfun:svcerr_weakauth=uninstrumented\nfun:svcfd_create=uninstrumented\nfun:svcraw_create=uninstrumented\nfun:svctcp_create=uninstrumented\nfun:svcudp_bufcreate=uninstrumented\nfun:svcudp_create=uninstrumented\nfun:svcudp_enablecache=uninstrumented\nfun:svcunix_create=uninstrumented\nfun:svcunixfd_create=uninstrumented\nfun:swab=uninstrumented\nfun:swapcontext=uninstrumented\nfun:swapoff=uninstrumented\nfun:swapon=uninstrumented\nfun:swprintf=uninstrumented\nfun:swscanf=uninstrumented\nfun:symlink=uninstrumented\nfun:symlinkat=uninstrumented\nfun:sync=uninstrumented\nfun:sync_file_range=uninstrumented\nfun:syncfs=uninstrumented\nfun:syscall=uninstrumented\nfun:sysconf=uninstrumented\nfun:sysctl=uninstrumented\nfun:sysinfo=uninstrumented\nfun:syslog=uninstrumented\nfun:system=uninstrumented\nfun:sysv_signal=uninstrumented\nfun:tan=uninstrumented\nfun:tanf=uninstrumented\nfun:tanf128=uninstrumented\nfun:tanf32=uninstrumented\nfun:tanf32x=uninstrumented\nfun:tanf64=uninstrumented\nfun:tanf64x=uninstrumented\nfun:tanh=uninstrumented\nfun:tanhf=uninstrumented\nfun:tanhf128=uninstrumented\nfun:tanhf32=uninstrumented\nfun:tanhf32x=uninstrumented\nfun:tanhf64=uninstrumented\nfun:tanhf64x=uninstrumented\nfun:tanhl=uninstrumented\nfun:tanl=uninstrumented\nfun:tcdrain=uninstrumented\nfun:tcflow=uninstrumented\nfun:tcflush=uninstrumented\nfun:tcgetattr=uninstrumented\nfun:tcgetpgrp=uninstrumented\nfun:tcgetsid=uninstrumented\nfun:tcsendbreak=uninstrumented\nfun:tcsetattr=uninstrumented\nfun:tcsetpgrp=uninstrumented\nfun:td_init=uninstrumented\nfun:td_log=uninstrumented\nfun:td_symbol_list=uninstrumented\nfun:td_ta_clear_event=uninstrumented\nfun:td_ta_delete=uninstrumented\nfun:td_ta_enable_stats=uninstrumented\nfun:td_ta_event_addr=uninstrumented\nfun:td_ta_event_getmsg=uninstrumented\nfun:td_ta_get_nthreads=uninstrumented\nfun:td_ta_get_ph=uninstrumented\nfun:td_ta_get_stats=uninstrumented\nfun:td_ta_map_id2thr=uninstrumented\nfun:td_ta_map_lwp2thr=uninstrumented\nfun:td_ta_new=uninstrumented\nfun:td_ta_reset_stats=uninstrumented\nfun:td_ta_set_event=uninstrumented\nfun:td_ta_setconcurrency=uninstrumented\nfun:td_ta_thr_iter=uninstrumented\nfun:td_ta_tsd_iter=uninstrumented\nfun:td_thr_clear_event=uninstrumented\nfun:td_thr_dbresume=uninstrumented\nfun:td_thr_dbsuspend=uninstrumented\nfun:td_thr_event_enable=uninstrumented\nfun:td_thr_event_getmsg=uninstrumented\nfun:td_thr_get_info=uninstrumented\nfun:td_thr_getfpregs=uninstrumented\nfun:td_thr_getgregs=uninstrumented\nfun:td_thr_getxregs=uninstrumented\nfun:td_thr_getxregsize=uninstrumented\nfun:td_thr_set_event=uninstrumented\nfun:td_thr_setfpregs=uninstrumented\nfun:td_thr_setgregs=uninstrumented\nfun:td_thr_setprio=uninstrumented\nfun:td_thr_setsigpending=uninstrumented\nfun:td_thr_setxregs=uninstrumented\nfun:td_thr_sigsetmask=uninstrumented\nfun:td_thr_tls_get_addr=uninstrumented\nfun:td_thr_tlsbase=uninstrumented\nfun:td_thr_tsd=uninstrumented\nfun:td_thr_validate=uninstrumented\nfun:tdelete=uninstrumented\nfun:tdestroy=uninstrumented\nfun:tee=uninstrumented\nfun:telldir=uninstrumented\nfun:tempnam=uninstrumented\nfun:textdomain=uninstrumented\nfun:tfind=uninstrumented\nfun:tgamma=uninstrumented\nfun:tgammaf=uninstrumented\nfun:tgammaf128=uninstrumented\nfun:tgammaf32=uninstrumented\nfun:tgammaf32x=uninstrumented\nfun:tgammaf64=uninstrumented\nfun:tgammaf64x=uninstrumented\nfun:tgammal=uninstrumented\nfun:tgkill=uninstrumented\nfun:thrd_create=uninstrumented\nfun:thrd_current=uninstrumented\nfun:thrd_detach=uninstrumented\nfun:thrd_equal=uninstrumented\nfun:thrd_exit=uninstrumented\nfun:thrd_join=uninstrumented\nfun:thrd_sleep=uninstrumented\nfun:thrd_yield=uninstrumented\nfun:time=uninstrumented\nfun:timegm=uninstrumented\nfun:timelocal=uninstrumented\nfun:timer_create=uninstrumented\nfun:timer_delete=uninstrumented\nfun:timer_getoverrun=uninstrumented\nfun:timer_gettime=uninstrumented\nfun:timer_settime=uninstrumented\nfun:timerfd_create=uninstrumented\nfun:timerfd_gettime=uninstrumented\nfun:timerfd_settime=uninstrumented\nfun:times=uninstrumented\nfun:timespec_get=uninstrumented\nfun:timespec_getres=uninstrumented\nfun:tmpfile=uninstrumented\nfun:tmpfile64=uninstrumented\nfun:tmpnam=uninstrumented\nfun:tmpnam_r=uninstrumented\nfun:toascii=uninstrumented\nfun:tolower=uninstrumented\nfun:tolower_l=uninstrumented\nfun:totalorder=uninstrumented\nfun:totalorderf=uninstrumented\nfun:totalorderf128=uninstrumented\nfun:totalorderf32=uninstrumented\nfun:totalorderf32x=uninstrumented\nfun:totalorderf64=uninstrumented\nfun:totalorderf64x=uninstrumented\nfun:totalorderl=uninstrumented\nfun:totalordermag=uninstrumented\nfun:totalordermagf=uninstrumented\nfun:totalordermagf128=uninstrumented\nfun:totalordermagf32=uninstrumented\nfun:totalordermagf32x=uninstrumented\nfun:totalordermagf64=uninstrumented\nfun:totalordermagf64x=uninstrumented\nfun:totalordermagl=uninstrumented\nfun:toupper=uninstrumented\nfun:toupper_l=uninstrumented\nfun:towctrans=uninstrumented\nfun:towctrans_l=uninstrumented\nfun:towlower=uninstrumented\nfun:towlower_l=uninstrumented\nfun:towupper=uninstrumented\nfun:towupper_l=uninstrumented\nfun:tr_break=uninstrumented\nfun:trunc=uninstrumented\nfun:truncate=uninstrumented\nfun:truncate64=uninstrumented\nfun:truncf=uninstrumented\nfun:truncf128=uninstrumented\nfun:truncf32=uninstrumented\nfun:truncf32x=uninstrumented\nfun:truncf64=uninstrumented\nfun:truncf64x=uninstrumented\nfun:truncl=uninstrumented\nfun:tsearch=uninstrumented\nfun:tss_create=uninstrumented\nfun:tss_delete=uninstrumented\nfun:tss_get=uninstrumented\nfun:tss_set=uninstrumented\nfun:ttyname=uninstrumented\nfun:ttyname_r=uninstrumented\nfun:ttyslot=uninstrumented\nfun:twalk=uninstrumented\nfun:twalk_r=uninstrumented\nfun:tzset=uninstrumented\nfun:ualarm=uninstrumented\nfun:ufromfp=uninstrumented\nfun:ufromfpf=uninstrumented\nfun:ufromfpf128=uninstrumented\nfun:ufromfpf32=uninstrumented\nfun:ufromfpf32x=uninstrumented\nfun:ufromfpf64=uninstrumented\nfun:ufromfpf64x=uninstrumented\nfun:ufromfpl=uninstrumented\nfun:ufromfpx=uninstrumented\nfun:ufromfpxf=uninstrumented\nfun:ufromfpxf128=uninstrumented\nfun:ufromfpxf32=uninstrumented\nfun:ufromfpxf32x=uninstrumented\nfun:ufromfpxf64=uninstrumented\nfun:ufromfpxf64x=uninstrumented\nfun:ufromfpxl=uninstrumented\nfun:ulckpwdf=uninstrumented\nfun:ulimit=uninstrumented\nfun:umask=uninstrumented\nfun:umount=uninstrumented\nfun:umount2=uninstrumented\nfun:uname=uninstrumented\nfun:ungetc=uninstrumented\nfun:ungetwc=uninstrumented\nfun:unlink=uninstrumented\nfun:unlinkat=uninstrumented\nfun:unlockpt=uninstrumented\nfun:unsetenv=uninstrumented\nfun:unshare=uninstrumented\nfun:updwtmp=uninstrumented\nfun:updwtmpx=uninstrumented\nfun:uselib=uninstrumented\nfun:uselocale=uninstrumented\nfun:user2netname=uninstrumented\nfun:usleep=uninstrumented\nfun:ustat=uninstrumented\nfun:utime=uninstrumented\nfun:utimensat=uninstrumented\nfun:utimes=uninstrumented\nfun:utmpname=uninstrumented\nfun:utmpxname=uninstrumented\nfun:valloc=uninstrumented\nfun:vasprintf=uninstrumented\nfun:vdprintf=uninstrumented\nfun:verr=uninstrumented\nfun:verrx=uninstrumented\nfun:versionsort=uninstrumented\nfun:versionsort64=uninstrumented\nfun:vfork=uninstrumented\nfun:vfprintf=uninstrumented\nfun:vfscanf=uninstrumented\nfun:vfwprintf=uninstrumented\nfun:vfwscanf=uninstrumented\nfun:vhangup=uninstrumented\nfun:vlimit=uninstrumented\nfun:vmsplice=uninstrumented\nfun:vprintf=uninstrumented\nfun:vscanf=uninstrumented\nfun:vsnprintf=uninstrumented\nfun:vsprintf=uninstrumented\nfun:vsscanf=uninstrumented\nfun:vswprintf=uninstrumented\nfun:vswscanf=uninstrumented\nfun:vsyslog=uninstrumented\nfun:vtimes=uninstrumented\nfun:vwarn=uninstrumented\nfun:vwarnx=uninstrumented\nfun:vwprintf=uninstrumented\nfun:vwscanf=uninstrumented\nfun:wait=uninstrumented\nfun:wait3=uninstrumented\nfun:wait4=uninstrumented\nfun:waitid=uninstrumented\nfun:waitpid=uninstrumented\nfun:warn=uninstrumented\nfun:warnx=uninstrumented\nfun:wcpcpy=uninstrumented\nfun:wcpncpy=uninstrumented\nfun:wcrtomb=uninstrumented\nfun:wcscasecmp=uninstrumented\nfun:wcscasecmp_l=uninstrumented\nfun:wcscat=uninstrumented\nfun:wcschr=uninstrumented\nfun:wcschrnul=uninstrumented\nfun:wcscmp=uninstrumented\nfun:wcscoll=uninstrumented\nfun:wcscoll_l=uninstrumented\nfun:wcscpy=uninstrumented\nfun:wcscspn=uninstrumented\nfun:wcsdup=uninstrumented\nfun:wcsftime=uninstrumented\nfun:wcsftime_l=uninstrumented\nfun:wcslen=uninstrumented\nfun:wcsncasecmp=uninstrumented\nfun:wcsncasecmp_l=uninstrumented\nfun:wcsncat=uninstrumented\nfun:wcsncmp=uninstrumented\nfun:wcsncpy=uninstrumented\nfun:wcsnlen=uninstrumented\nfun:wcsnrtombs=uninstrumented\nfun:wcspbrk=uninstrumented\nfun:wcsrchr=uninstrumented\nfun:wcsrtombs=uninstrumented\nfun:wcsspn=uninstrumented\nfun:wcsstr=uninstrumented\nfun:wcstod=uninstrumented\nfun:wcstod_l=uninstrumented\nfun:wcstof=uninstrumented\nfun:wcstof128=uninstrumented\nfun:wcstof128_l=uninstrumented\nfun:wcstof32=uninstrumented\nfun:wcstof32_l=uninstrumented\nfun:wcstof32x=uninstrumented\nfun:wcstof32x_l=uninstrumented\nfun:wcstof64=uninstrumented\nfun:wcstof64_l=uninstrumented\nfun:wcstof64x=uninstrumented\nfun:wcstof64x_l=uninstrumented\nfun:wcstof_l=uninstrumented\nfun:wcstoimax=uninstrumented\nfun:wcstok=uninstrumented\nfun:wcstol=uninstrumented\nfun:wcstol_l=uninstrumented\nfun:wcstold=uninstrumented\nfun:wcstold_l=uninstrumented\nfun:wcstoll=uninstrumented\nfun:wcstoll_l=uninstrumented\nfun:wcstombs=uninstrumented\nfun:wcstoq=uninstrumented\nfun:wcstoul=uninstrumented\nfun:wcstoul_l=uninstrumented\nfun:wcstoull=uninstrumented\nfun:wcstoull_l=uninstrumented\nfun:wcstoumax=uninstrumented\nfun:wcstouq=uninstrumented\nfun:wcswcs=uninstrumented\nfun:wcswidth=uninstrumented\nfun:wcsxfrm=uninstrumented\nfun:wcsxfrm_l=uninstrumented\nfun:wctob=uninstrumented\nfun:wctomb=uninstrumented\nfun:wctrans=uninstrumented\nfun:wctrans_l=uninstrumented\nfun:wctype=uninstrumented\nfun:wctype_l=uninstrumented\nfun:wcwidth=uninstrumented\nfun:wmemchr=uninstrumented\nfun:wmemcmp=uninstrumented\nfun:wmemcpy=uninstrumented\nfun:wmemmove=uninstrumented\nfun:wmempcpy=uninstrumented\nfun:wmemset=uninstrumented\nfun:wordexp=uninstrumented\nfun:wordfree=uninstrumented\nfun:wprintf=uninstrumented\nfun:write=uninstrumented\nfun:writeColdStartFile=uninstrumented\nfun:writev=uninstrumented\nfun:wscanf=uninstrumented\nfun:xcrypt=uninstrumented\nfun:xcrypt_gensalt=uninstrumented\nfun:xcrypt_gensalt_r=uninstrumented\nfun:xcrypt_r=uninstrumented\nfun:xdecrypt=uninstrumented\nfun:xdr_accepted_reply=uninstrumented\nfun:xdr_array=uninstrumented\nfun:xdr_authdes_cred=uninstrumented\nfun:xdr_authdes_verf=uninstrumented\nfun:xdr_authunix_parms=uninstrumented\nfun:xdr_bool=uninstrumented\nfun:xdr_bytes=uninstrumented\nfun:xdr_callhdr=uninstrumented\nfun:xdr_callmsg=uninstrumented\nfun:xdr_cback_data=uninstrumented\nfun:xdr_char=uninstrumented\nfun:xdr_cryptkeyarg=uninstrumented\nfun:xdr_cryptkeyarg2=uninstrumented\nfun:xdr_cryptkeyres=uninstrumented\nfun:xdr_des_block=uninstrumented\nfun:xdr_domainname=uninstrumented\nfun:xdr_double=uninstrumented\nfun:xdr_enum=uninstrumented\nfun:xdr_float=uninstrumented\nfun:xdr_free=uninstrumented\nfun:xdr_getcredres=uninstrumented\nfun:xdr_hyper=uninstrumented\nfun:xdr_int=uninstrumented\nfun:xdr_int16_t=uninstrumented\nfun:xdr_int32_t=uninstrumented\nfun:xdr_int64_t=uninstrumented\nfun:xdr_int8_t=uninstrumented\nfun:xdr_key_netstarg=uninstrumented\nfun:xdr_key_netstres=uninstrumented\nfun:xdr_keybuf=uninstrumented\nfun:xdr_keydat=uninstrumented\nfun:xdr_keystatus=uninstrumented\nfun:xdr_long=uninstrumented\nfun:xdr_longlong_t=uninstrumented\nfun:xdr_mapname=uninstrumented\nfun:xdr_netnamestr=uninstrumented\nfun:xdr_netobj=uninstrumented\nfun:xdr_obj_p=uninstrumented\nfun:xdr_opaque=uninstrumented\nfun:xdr_opaque_auth=uninstrumented\nfun:xdr_peername=uninstrumented\nfun:xdr_pmap=uninstrumented\nfun:xdr_pmaplist=uninstrumented\nfun:xdr_pointer=uninstrumented\nfun:xdr_quad_t=uninstrumented\nfun:xdr_reference=uninstrumented\nfun:xdr_rejected_reply=uninstrumented\nfun:xdr_replymsg=uninstrumented\nfun:xdr_rmtcall_args=uninstrumented\nfun:xdr_rmtcallres=uninstrumented\nfun:xdr_short=uninstrumented\nfun:xdr_sizeof=uninstrumented\nfun:xdr_string=uninstrumented\nfun:xdr_u_char=uninstrumented\nfun:xdr_u_hyper=uninstrumented\nfun:xdr_u_int=uninstrumented\nfun:xdr_u_long=uninstrumented\nfun:xdr_u_longlong_t=uninstrumented\nfun:xdr_u_quad_t=uninstrumented\nfun:xdr_u_short=uninstrumented\nfun:xdr_uint16_t=uninstrumented\nfun:xdr_uint32_t=uninstrumented\nfun:xdr_uint64_t=uninstrumented\nfun:xdr_uint8_t=uninstrumented\nfun:xdr_union=uninstrumented\nfun:xdr_unixcred=uninstrumented\nfun:xdr_valdat=uninstrumented\nfun:xdr_vector=uninstrumented\nfun:xdr_void=uninstrumented\nfun:xdr_wrapstring=uninstrumented\nfun:xdr_yp_buf=uninstrumented\nfun:xdr_ypall=uninstrumented\nfun:xdr_ypbind_binding=uninstrumented\nfun:xdr_ypbind_resp=uninstrumented\nfun:xdr_ypbind_resptype=uninstrumented\nfun:xdr_ypbind_setdom=uninstrumented\nfun:xdr_ypdelete_args=uninstrumented\nfun:xdr_ypmap_parms=uninstrumented\nfun:xdr_ypmaplist=uninstrumented\nfun:xdr_yppush_status=uninstrumented\nfun:xdr_yppushresp_xfr=uninstrumented\nfun:xdr_ypreq_key=uninstrumented\nfun:xdr_ypreq_nokey=uninstrumented\nfun:xdr_ypreq_xfr=uninstrumented\nfun:xdr_ypresp_all=uninstrumented\nfun:xdr_ypresp_key_val=uninstrumented\nfun:xdr_ypresp_maplist=uninstrumented\nfun:xdr_ypresp_master=uninstrumented\nfun:xdr_ypresp_order=uninstrumented\nfun:xdr_ypresp_val=uninstrumented\nfun:xdr_ypresp_xfr=uninstrumented\nfun:xdr_ypstat=uninstrumented\nfun:xdr_ypupdate_args=uninstrumented\nfun:xdr_ypxfrstat=uninstrumented\nfun:xdrmem_create=uninstrumented\nfun:xdrrec_create=uninstrumented\nfun:xdrrec_endofrecord=uninstrumented\nfun:xdrrec_eof=uninstrumented\nfun:xdrrec_skiprecord=uninstrumented\nfun:xdrstdio_create=uninstrumented\nfun:xencrypt=uninstrumented\nfun:xprt_register=uninstrumented\nfun:xprt_unregister=uninstrumented\nfun:y0=uninstrumented\nfun:y0f=uninstrumented\nfun:y0f128=uninstrumented\nfun:y0f32=uninstrumented\nfun:y0f32x=uninstrumented\nfun:y0f64=uninstrumented\nfun:y0f64x=uninstrumented\nfun:y0l=uninstrumented\nfun:y1=uninstrumented\nfun:y1f=uninstrumented\nfun:y1f128=uninstrumented\nfun:y1f32=uninstrumented\nfun:y1f32x=uninstrumented\nfun:y1f64=uninstrumented\nfun:y1f64x=uninstrumented\nfun:y1l=uninstrumented\nfun:yn=uninstrumented\nfun:ynf=uninstrumented\nfun:ynf128=uninstrumented\nfun:ynf32=uninstrumented\nfun:ynf32x=uninstrumented\nfun:ynf64=uninstrumented\nfun:ynf64x=uninstrumented\nfun:ynl=uninstrumented\nfun:yp_all=uninstrumented\nfun:yp_bind=uninstrumented\nfun:yp_first=uninstrumented\nfun:yp_get_default_domain=uninstrumented\nfun:yp_maplist=uninstrumented\nfun:yp_master=uninstrumented\nfun:yp_match=uninstrumented\nfun:yp_next=uninstrumented\nfun:yp_order=uninstrumented\nfun:yp_unbind=uninstrumented\nfun:yp_update=uninstrumented\nfun:ypbinderr_string=uninstrumented\nfun:yperr_string=uninstrumented\nfun:ypprot_err=uninstrumented\n"
  },
  {
    "path": "runtime/dfsan/libc_ubuntu2404_abilist.txt",
    "content": "fun:_Exit=uninstrumented\nfun:_Fork=uninstrumented\nfun:_IO_adjust_column=uninstrumented\nfun:_IO_adjust_wcolumn=uninstrumented\nfun:_IO_default_doallocate=uninstrumented\nfun:_IO_default_finish=uninstrumented\nfun:_IO_default_pbackfail=uninstrumented\nfun:_IO_default_uflow=uninstrumented\nfun:_IO_default_xsgetn=uninstrumented\nfun:_IO_default_xsputn=uninstrumented\nfun:_IO_do_write=uninstrumented\nfun:_IO_doallocbuf=uninstrumented\nfun:_IO_enable_locks=uninstrumented\nfun:_IO_fclose=uninstrumented\nfun:_IO_fdopen=uninstrumented\nfun:_IO_feof=uninstrumented\nfun:_IO_ferror=uninstrumented\nfun:_IO_fflush=uninstrumented\nfun:_IO_fgetpos=uninstrumented\nfun:_IO_fgetpos64=uninstrumented\nfun:_IO_fgets=uninstrumented\nfun:_IO_file_attach=uninstrumented\nfun:_IO_file_close=uninstrumented\nfun:_IO_file_close_it=uninstrumented\nfun:_IO_file_doallocate=uninstrumented\nfun:_IO_file_finish=uninstrumented\nfun:_IO_file_fopen=uninstrumented\nfun:_IO_file_init=uninstrumented\nfun:_IO_file_open=uninstrumented\nfun:_IO_file_overflow=uninstrumented\nfun:_IO_file_read=uninstrumented\nfun:_IO_file_seek=uninstrumented\nfun:_IO_file_seekoff=uninstrumented\nfun:_IO_file_setbuf=uninstrumented\nfun:_IO_file_stat=uninstrumented\nfun:_IO_file_sync=uninstrumented\nfun:_IO_file_underflow=uninstrumented\nfun:_IO_file_write=uninstrumented\nfun:_IO_file_xsputn=uninstrumented\nfun:_IO_flockfile=uninstrumented\nfun:_IO_flush_all=uninstrumented\nfun:_IO_flush_all_linebuffered=uninstrumented\nfun:_IO_fopen=uninstrumented\nfun:_IO_fprintf=uninstrumented\nfun:_IO_fputs=uninstrumented\nfun:_IO_fread=uninstrumented\nfun:_IO_free_backup_area=uninstrumented\nfun:_IO_free_wbackup_area=uninstrumented\nfun:_IO_fsetpos=uninstrumented\nfun:_IO_fsetpos64=uninstrumented\nfun:_IO_ftell=uninstrumented\nfun:_IO_ftrylockfile=uninstrumented\nfun:_IO_funlockfile=uninstrumented\nfun:_IO_fwrite=uninstrumented\nfun:_IO_getc=uninstrumented\nfun:_IO_getline=uninstrumented\nfun:_IO_getline_info=uninstrumented\nfun:_IO_gets=uninstrumented\nfun:_IO_init=uninstrumented\nfun:_IO_init_marker=uninstrumented\nfun:_IO_init_wmarker=uninstrumented\nfun:_IO_iter_begin=uninstrumented\nfun:_IO_iter_end=uninstrumented\nfun:_IO_iter_file=uninstrumented\nfun:_IO_iter_next=uninstrumented\nfun:_IO_least_wmarker=uninstrumented\nfun:_IO_link_in=uninstrumented\nfun:_IO_list_lock=uninstrumented\nfun:_IO_list_resetlock=uninstrumented\nfun:_IO_list_unlock=uninstrumented\nfun:_IO_marker_delta=uninstrumented\nfun:_IO_marker_difference=uninstrumented\nfun:_IO_padn=uninstrumented\nfun:_IO_peekc_locked=uninstrumented\nfun:_IO_popen=uninstrumented\nfun:_IO_printf=uninstrumented\nfun:_IO_proc_close=uninstrumented\nfun:_IO_proc_open=uninstrumented\nfun:_IO_putc=uninstrumented\nfun:_IO_puts=uninstrumented\nfun:_IO_remove_marker=uninstrumented\nfun:_IO_seekmark=uninstrumented\nfun:_IO_seekoff=uninstrumented\nfun:_IO_seekpos=uninstrumented\nfun:_IO_seekwmark=uninstrumented\nfun:_IO_setb=uninstrumented\nfun:_IO_setbuffer=uninstrumented\nfun:_IO_setvbuf=uninstrumented\nfun:_IO_sgetn=uninstrumented\nfun:_IO_sprintf=uninstrumented\nfun:_IO_sputbackc=uninstrumented\nfun:_IO_sputbackwc=uninstrumented\nfun:_IO_sscanf=uninstrumented\nfun:_IO_str_init_readonly=uninstrumented\nfun:_IO_str_init_static=uninstrumented\nfun:_IO_str_overflow=uninstrumented\nfun:_IO_str_pbackfail=uninstrumented\nfun:_IO_str_seekoff=uninstrumented\nfun:_IO_str_underflow=uninstrumented\nfun:_IO_sungetc=uninstrumented\nfun:_IO_sungetwc=uninstrumented\nfun:_IO_switch_to_get_mode=uninstrumented\nfun:_IO_switch_to_main_wget_area=uninstrumented\nfun:_IO_switch_to_wbackup_area=uninstrumented\nfun:_IO_switch_to_wget_mode=uninstrumented\nfun:_IO_un_link=uninstrumented\nfun:_IO_ungetc=uninstrumented\nfun:_IO_unsave_markers=uninstrumented\nfun:_IO_unsave_wmarkers=uninstrumented\nfun:_IO_vfprintf=uninstrumented\nfun:_IO_vfscanf=uninstrumented\nfun:_IO_vsprintf=uninstrumented\nfun:_IO_wdefault_doallocate=uninstrumented\nfun:_IO_wdefault_finish=uninstrumented\nfun:_IO_wdefault_pbackfail=uninstrumented\nfun:_IO_wdefault_uflow=uninstrumented\nfun:_IO_wdefault_xsgetn=uninstrumented\nfun:_IO_wdefault_xsputn=uninstrumented\nfun:_IO_wdo_write=uninstrumented\nfun:_IO_wdoallocbuf=uninstrumented\nfun:_IO_wfile_overflow=uninstrumented\nfun:_IO_wfile_seekoff=uninstrumented\nfun:_IO_wfile_sync=uninstrumented\nfun:_IO_wfile_underflow=uninstrumented\nfun:_IO_wfile_xsputn=uninstrumented\nfun:_IO_wmarker_delta=uninstrumented\nfun:_IO_wsetb=uninstrumented\nfun:_Unwind_Backtrace=uninstrumented\nfun:_Unwind_DeleteException=uninstrumented\nfun:_Unwind_FindEnclosingFunction=uninstrumented\nfun:_Unwind_Find_FDE=uninstrumented\nfun:_Unwind_ForcedUnwind=uninstrumented\nfun:_Unwind_GetCFA=uninstrumented\nfun:_Unwind_GetDataRelBase=uninstrumented\nfun:_Unwind_GetGR=uninstrumented\nfun:_Unwind_GetIP=uninstrumented\nfun:_Unwind_GetIPInfo=uninstrumented\nfun:_Unwind_GetLanguageSpecificData=uninstrumented\nfun:_Unwind_GetRegionStart=uninstrumented\nfun:_Unwind_GetTextRelBase=uninstrumented\nfun:_Unwind_RaiseException=uninstrumented\nfun:_Unwind_Resume=uninstrumented\nfun:_Unwind_Resume_or_Rethrow=uninstrumented\nfun:_Unwind_SetGR=uninstrumented\nfun:_Unwind_SetIP=uninstrumented\nfun:__absvdi2=uninstrumented\nfun:__absvsi2=uninstrumented\nfun:__absvti2=uninstrumented\nfun:__acos_finite=uninstrumented\nfun:__acosf128_finite=uninstrumented\nfun:__acosf_finite=uninstrumented\nfun:__acosh_finite=uninstrumented\nfun:__acoshf128_finite=uninstrumented\nfun:__acoshf_finite=uninstrumented\nfun:__acoshl_finite=uninstrumented\nfun:__acosl_finite=uninstrumented\nfun:__addtf3=uninstrumented\nfun:__addvdi3=uninstrumented\nfun:__addvsi3=uninstrumented\nfun:__addvti3=uninstrumented\nfun:__adjtimex=uninstrumented\nfun:__arch_prctl=uninstrumented\nfun:__argz_count=uninstrumented\nfun:__argz_next=uninstrumented\nfun:__argz_stringify=uninstrumented\nfun:__ashlti3=uninstrumented\nfun:__ashrti3=uninstrumented\nfun:__asin_finite=uninstrumented\nfun:__asinf128_finite=uninstrumented\nfun:__asinf_finite=uninstrumented\nfun:__asinl_finite=uninstrumented\nfun:__asprintf=uninstrumented\nfun:__asprintf_chk=uninstrumented\nfun:__assert=uninstrumented\nfun:__assert_fail=uninstrumented\nfun:__assert_perror_fail=uninstrumented\nfun:__atan2_finite=uninstrumented\nfun:__atan2f128_finite=uninstrumented\nfun:__atan2f_finite=uninstrumented\nfun:__atan2l_finite=uninstrumented\nfun:__atanh_finite=uninstrumented\nfun:__atanhf128_finite=uninstrumented\nfun:__atanhf_finite=uninstrumented\nfun:__atanhl_finite=uninstrumented\nfun:__avx_resms64_12=uninstrumented\nfun:__avx_resms64_13=uninstrumented\nfun:__avx_resms64_14=uninstrumented\nfun:__avx_resms64_15=uninstrumented\nfun:__avx_resms64_16=uninstrumented\nfun:__avx_resms64_17=uninstrumented\nfun:__avx_resms64_18=uninstrumented\nfun:__avx_resms64f_12=uninstrumented\nfun:__avx_resms64f_13=uninstrumented\nfun:__avx_resms64f_14=uninstrumented\nfun:__avx_resms64f_15=uninstrumented\nfun:__avx_resms64f_16=uninstrumented\nfun:__avx_resms64f_17=uninstrumented\nfun:__avx_resms64fx_12=uninstrumented\nfun:__avx_resms64fx_13=uninstrumented\nfun:__avx_resms64fx_14=uninstrumented\nfun:__avx_resms64fx_15=uninstrumented\nfun:__avx_resms64fx_16=uninstrumented\nfun:__avx_resms64fx_17=uninstrumented\nfun:__avx_resms64x_12=uninstrumented\nfun:__avx_resms64x_13=uninstrumented\nfun:__avx_resms64x_14=uninstrumented\nfun:__avx_resms64x_15=uninstrumented\nfun:__avx_resms64x_16=uninstrumented\nfun:__avx_resms64x_17=uninstrumented\nfun:__avx_resms64x_18=uninstrumented\nfun:__avx_savms64_12=uninstrumented\nfun:__avx_savms64_13=uninstrumented\nfun:__avx_savms64_14=uninstrumented\nfun:__avx_savms64_15=uninstrumented\nfun:__avx_savms64_16=uninstrumented\nfun:__avx_savms64_17=uninstrumented\nfun:__avx_savms64_18=uninstrumented\nfun:__avx_savms64f_12=uninstrumented\nfun:__avx_savms64f_13=uninstrumented\nfun:__avx_savms64f_14=uninstrumented\nfun:__avx_savms64f_15=uninstrumented\nfun:__avx_savms64f_16=uninstrumented\nfun:__avx_savms64f_17=uninstrumented\nfun:__b64_ntop=uninstrumented\nfun:__b64_pton=uninstrumented\nfun:__backtrace=uninstrumented\nfun:__backtrace_symbols=uninstrumented\nfun:__backtrace_symbols_fd=uninstrumented\nfun:__bid128_abs=uninstrumented\nfun:__bid128_add=uninstrumented\nfun:__bid128_class=uninstrumented\nfun:__bid128_copy=uninstrumented\nfun:__bid128_copySign=uninstrumented\nfun:__bid128_div=uninstrumented\nfun:__bid128_fma=uninstrumented\nfun:__bid128_from_int32=uninstrumented\nfun:__bid128_from_int64=uninstrumented\nfun:__bid128_from_uint32=uninstrumented\nfun:__bid128_from_uint64=uninstrumented\nfun:__bid128_isCanonical=uninstrumented\nfun:__bid128_isFinite=uninstrumented\nfun:__bid128_isInf=uninstrumented\nfun:__bid128_isNaN=uninstrumented\nfun:__bid128_isNormal=uninstrumented\nfun:__bid128_isSignaling=uninstrumented\nfun:__bid128_isSigned=uninstrumented\nfun:__bid128_isSubnormal=uninstrumented\nfun:__bid128_isZero=uninstrumented\nfun:__bid128_mul=uninstrumented\nfun:__bid128_negate=uninstrumented\nfun:__bid128_quiet_equal=uninstrumented\nfun:__bid128_quiet_greater=uninstrumented\nfun:__bid128_quiet_greater_equal=uninstrumented\nfun:__bid128_quiet_greater_unordered=uninstrumented\nfun:__bid128_quiet_less=uninstrumented\nfun:__bid128_quiet_less_equal=uninstrumented\nfun:__bid128_quiet_less_unordered=uninstrumented\nfun:__bid128_quiet_not_equal=uninstrumented\nfun:__bid128_quiet_not_greater=uninstrumented\nfun:__bid128_quiet_not_less=uninstrumented\nfun:__bid128_quiet_ordered=uninstrumented\nfun:__bid128_quiet_unordered=uninstrumented\nfun:__bid128_radix=uninstrumented\nfun:__bid128_sameQuantum=uninstrumented\nfun:__bid128_signaling_greater=uninstrumented\nfun:__bid128_signaling_greater_equal=uninstrumented\nfun:__bid128_signaling_greater_unordered=uninstrumented\nfun:__bid128_signaling_less=uninstrumented\nfun:__bid128_signaling_less_equal=uninstrumented\nfun:__bid128_signaling_less_unordered=uninstrumented\nfun:__bid128_signaling_not_greater=uninstrumented\nfun:__bid128_signaling_not_less=uninstrumented\nfun:__bid128_sub=uninstrumented\nfun:__bid128_to_bid32=uninstrumented\nfun:__bid128_to_bid64=uninstrumented\nfun:__bid128_to_binary128=uninstrumented\nfun:__bid128_to_binary32=uninstrumented\nfun:__bid128_to_binary64=uninstrumented\nfun:__bid128_to_binary80=uninstrumented\nfun:__bid128_to_int32_ceil=uninstrumented\nfun:__bid128_to_int32_floor=uninstrumented\nfun:__bid128_to_int32_int=uninstrumented\nfun:__bid128_to_int32_rnint=uninstrumented\nfun:__bid128_to_int32_rninta=uninstrumented\nfun:__bid128_to_int32_xceil=uninstrumented\nfun:__bid128_to_int32_xfloor=uninstrumented\nfun:__bid128_to_int32_xint=uninstrumented\nfun:__bid128_to_int32_xrnint=uninstrumented\nfun:__bid128_to_int32_xrninta=uninstrumented\nfun:__bid128_to_int64_ceil=uninstrumented\nfun:__bid128_to_int64_floor=uninstrumented\nfun:__bid128_to_int64_int=uninstrumented\nfun:__bid128_to_int64_rnint=uninstrumented\nfun:__bid128_to_int64_rninta=uninstrumented\nfun:__bid128_to_int64_xceil=uninstrumented\nfun:__bid128_to_int64_xfloor=uninstrumented\nfun:__bid128_to_int64_xint=uninstrumented\nfun:__bid128_to_int64_xrnint=uninstrumented\nfun:__bid128_to_int64_xrninta=uninstrumented\nfun:__bid128_to_uint32_ceil=uninstrumented\nfun:__bid128_to_uint32_floor=uninstrumented\nfun:__bid128_to_uint32_int=uninstrumented\nfun:__bid128_to_uint32_rnint=uninstrumented\nfun:__bid128_to_uint32_rninta=uninstrumented\nfun:__bid128_to_uint32_xceil=uninstrumented\nfun:__bid128_to_uint32_xfloor=uninstrumented\nfun:__bid128_to_uint32_xint=uninstrumented\nfun:__bid128_to_uint32_xrnint=uninstrumented\nfun:__bid128_to_uint32_xrninta=uninstrumented\nfun:__bid128_to_uint64_ceil=uninstrumented\nfun:__bid128_to_uint64_floor=uninstrumented\nfun:__bid128_to_uint64_int=uninstrumented\nfun:__bid128_to_uint64_rnint=uninstrumented\nfun:__bid128_to_uint64_rninta=uninstrumented\nfun:__bid128_to_uint64_xceil=uninstrumented\nfun:__bid128_to_uint64_xfloor=uninstrumented\nfun:__bid128_to_uint64_xint=uninstrumented\nfun:__bid128_to_uint64_xrnint=uninstrumented\nfun:__bid128_to_uint64_xrninta=uninstrumented\nfun:__bid128_totalOrder=uninstrumented\nfun:__bid128_totalOrderMag=uninstrumented\nfun:__bid128dd_add=uninstrumented\nfun:__bid128dd_div=uninstrumented\nfun:__bid128dd_mul=uninstrumented\nfun:__bid128dd_sub=uninstrumented\nfun:__bid128ddd_fma=uninstrumented\nfun:__bid128ddq_fma=uninstrumented\nfun:__bid128dq_add=uninstrumented\nfun:__bid128dq_div=uninstrumented\nfun:__bid128dq_mul=uninstrumented\nfun:__bid128dq_sub=uninstrumented\nfun:__bid128dqd_fma=uninstrumented\nfun:__bid128dqq_fma=uninstrumented\nfun:__bid128qd_add=uninstrumented\nfun:__bid128qd_div=uninstrumented\nfun:__bid128qd_mul=uninstrumented\nfun:__bid128qd_sub=uninstrumented\nfun:__bid128qdd_fma=uninstrumented\nfun:__bid128qdq_fma=uninstrumented\nfun:__bid128qqd_fma=uninstrumented\nfun:__bid32_to_bid128=uninstrumented\nfun:__bid32_to_bid64=uninstrumented\nfun:__bid32_to_binary128=uninstrumented\nfun:__bid32_to_binary32=uninstrumented\nfun:__bid32_to_binary64=uninstrumented\nfun:__bid32_to_binary80=uninstrumented\nfun:__bid64_abs=uninstrumented\nfun:__bid64_add=uninstrumented\nfun:__bid64_class=uninstrumented\nfun:__bid64_copy=uninstrumented\nfun:__bid64_copySign=uninstrumented\nfun:__bid64_div=uninstrumented\nfun:__bid64_from_int32=uninstrumented\nfun:__bid64_from_int64=uninstrumented\nfun:__bid64_from_uint32=uninstrumented\nfun:__bid64_from_uint64=uninstrumented\nfun:__bid64_isCanonical=uninstrumented\nfun:__bid64_isFinite=uninstrumented\nfun:__bid64_isInf=uninstrumented\nfun:__bid64_isNaN=uninstrumented\nfun:__bid64_isNormal=uninstrumented\nfun:__bid64_isSignaling=uninstrumented\nfun:__bid64_isSigned=uninstrumented\nfun:__bid64_isSubnormal=uninstrumented\nfun:__bid64_isZero=uninstrumented\nfun:__bid64_mul=uninstrumented\nfun:__bid64_negate=uninstrumented\nfun:__bid64_quiet_equal=uninstrumented\nfun:__bid64_quiet_greater=uninstrumented\nfun:__bid64_quiet_greater_equal=uninstrumented\nfun:__bid64_quiet_greater_unordered=uninstrumented\nfun:__bid64_quiet_less=uninstrumented\nfun:__bid64_quiet_less_equal=uninstrumented\nfun:__bid64_quiet_less_unordered=uninstrumented\nfun:__bid64_quiet_not_equal=uninstrumented\nfun:__bid64_quiet_not_greater=uninstrumented\nfun:__bid64_quiet_not_less=uninstrumented\nfun:__bid64_quiet_ordered=uninstrumented\nfun:__bid64_quiet_unordered=uninstrumented\nfun:__bid64_radix=uninstrumented\nfun:__bid64_sameQuantum=uninstrumented\nfun:__bid64_signaling_greater=uninstrumented\nfun:__bid64_signaling_greater_equal=uninstrumented\nfun:__bid64_signaling_greater_unordered=uninstrumented\nfun:__bid64_signaling_less=uninstrumented\nfun:__bid64_signaling_less_equal=uninstrumented\nfun:__bid64_signaling_less_unordered=uninstrumented\nfun:__bid64_signaling_not_greater=uninstrumented\nfun:__bid64_signaling_not_less=uninstrumented\nfun:__bid64_sub=uninstrumented\nfun:__bid64_to_bid128=uninstrumented\nfun:__bid64_to_bid32=uninstrumented\nfun:__bid64_to_binary128=uninstrumented\nfun:__bid64_to_binary32=uninstrumented\nfun:__bid64_to_binary64=uninstrumented\nfun:__bid64_to_binary80=uninstrumented\nfun:__bid64_to_int32_ceil=uninstrumented\nfun:__bid64_to_int32_floor=uninstrumented\nfun:__bid64_to_int32_int=uninstrumented\nfun:__bid64_to_int32_rnint=uninstrumented\nfun:__bid64_to_int32_rninta=uninstrumented\nfun:__bid64_to_int32_xceil=uninstrumented\nfun:__bid64_to_int32_xfloor=uninstrumented\nfun:__bid64_to_int32_xint=uninstrumented\nfun:__bid64_to_int32_xrnint=uninstrumented\nfun:__bid64_to_int32_xrninta=uninstrumented\nfun:__bid64_to_int64_ceil=uninstrumented\nfun:__bid64_to_int64_floor=uninstrumented\nfun:__bid64_to_int64_int=uninstrumented\nfun:__bid64_to_int64_rnint=uninstrumented\nfun:__bid64_to_int64_rninta=uninstrumented\nfun:__bid64_to_int64_xceil=uninstrumented\nfun:__bid64_to_int64_xfloor=uninstrumented\nfun:__bid64_to_int64_xint=uninstrumented\nfun:__bid64_to_int64_xrnint=uninstrumented\nfun:__bid64_to_int64_xrninta=uninstrumented\nfun:__bid64_to_uint32_ceil=uninstrumented\nfun:__bid64_to_uint32_floor=uninstrumented\nfun:__bid64_to_uint32_int=uninstrumented\nfun:__bid64_to_uint32_rnint=uninstrumented\nfun:__bid64_to_uint32_rninta=uninstrumented\nfun:__bid64_to_uint32_xceil=uninstrumented\nfun:__bid64_to_uint32_xfloor=uninstrumented\nfun:__bid64_to_uint32_xint=uninstrumented\nfun:__bid64_to_uint32_xrnint=uninstrumented\nfun:__bid64_to_uint32_xrninta=uninstrumented\nfun:__bid64_to_uint64_ceil=uninstrumented\nfun:__bid64_to_uint64_floor=uninstrumented\nfun:__bid64_to_uint64_int=uninstrumented\nfun:__bid64_to_uint64_rnint=uninstrumented\nfun:__bid64_to_uint64_rninta=uninstrumented\nfun:__bid64_to_uint64_xceil=uninstrumented\nfun:__bid64_to_uint64_xfloor=uninstrumented\nfun:__bid64_to_uint64_xint=uninstrumented\nfun:__bid64_to_uint64_xrnint=uninstrumented\nfun:__bid64_to_uint64_xrninta=uninstrumented\nfun:__bid64_totalOrder=uninstrumented\nfun:__bid64_totalOrderMag=uninstrumented\nfun:__bid64ddq_fma=uninstrumented\nfun:__bid64dq_add=uninstrumented\nfun:__bid64dq_div=uninstrumented\nfun:__bid64dq_mul=uninstrumented\nfun:__bid64dq_sub=uninstrumented\nfun:__bid64dqd_fma=uninstrumented\nfun:__bid64dqq_fma=uninstrumented\nfun:__bid64qd_add=uninstrumented\nfun:__bid64qd_div=uninstrumented\nfun:__bid64qd_mul=uninstrumented\nfun:__bid64qd_sub=uninstrumented\nfun:__bid64qdd_fma=uninstrumented\nfun:__bid64qdq_fma=uninstrumented\nfun:__bid64qq_add=uninstrumented\nfun:__bid64qq_div=uninstrumented\nfun:__bid64qq_mul=uninstrumented\nfun:__bid64qq_sub=uninstrumented\nfun:__bid64qqd_fma=uninstrumented\nfun:__bid64qqq_fma=uninstrumented\nfun:__bid_adddd3=uninstrumented\nfun:__bid_addsd3=uninstrumented\nfun:__bid_addtd3=uninstrumented\nfun:__bid_divdd3=uninstrumented\nfun:__bid_divsd3=uninstrumented\nfun:__bid_divtd3=uninstrumented\nfun:__bid_eqdd2=uninstrumented\nfun:__bid_eqsd2=uninstrumented\nfun:__bid_eqtd2=uninstrumented\nfun:__bid_extendddtd2=uninstrumented\nfun:__bid_extendddtf=uninstrumented\nfun:__bid_extendddxf=uninstrumented\nfun:__bid_extenddfdd=uninstrumented\nfun:__bid_extenddftd=uninstrumented\nfun:__bid_extendsddd2=uninstrumented\nfun:__bid_extendsddf=uninstrumented\nfun:__bid_extendsdtd2=uninstrumented\nfun:__bid_extendsdtf=uninstrumented\nfun:__bid_extendsdxf=uninstrumented\nfun:__bid_extendsfdd=uninstrumented\nfun:__bid_extendsfsd=uninstrumented\nfun:__bid_extendsftd=uninstrumented\nfun:__bid_extendtftd=uninstrumented\nfun:__bid_extendxftd=uninstrumented\nfun:__bid_fixdddi=uninstrumented\nfun:__bid_fixddsi=uninstrumented\nfun:__bid_fixsddi=uninstrumented\nfun:__bid_fixsdsi=uninstrumented\nfun:__bid_fixtddi=uninstrumented\nfun:__bid_fixtdsi=uninstrumented\nfun:__bid_fixunsdddi=uninstrumented\nfun:__bid_fixunsddsi=uninstrumented\nfun:__bid_fixunssddi=uninstrumented\nfun:__bid_fixunssdsi=uninstrumented\nfun:__bid_fixunstddi=uninstrumented\nfun:__bid_fixunstdsi=uninstrumented\nfun:__bid_floatdidd=uninstrumented\nfun:__bid_floatdisd=uninstrumented\nfun:__bid_floatditd=uninstrumented\nfun:__bid_floatsidd=uninstrumented\nfun:__bid_floatsisd=uninstrumented\nfun:__bid_floatsitd=uninstrumented\nfun:__bid_floatunsdidd=uninstrumented\nfun:__bid_floatunsdisd=uninstrumented\nfun:__bid_floatunsditd=uninstrumented\nfun:__bid_floatunssidd=uninstrumented\nfun:__bid_floatunssisd=uninstrumented\nfun:__bid_floatunssitd=uninstrumented\nfun:__bid_gedd2=uninstrumented\nfun:__bid_gesd2=uninstrumented\nfun:__bid_getd2=uninstrumented\nfun:__bid_gtdd2=uninstrumented\nfun:__bid_gtsd2=uninstrumented\nfun:__bid_gttd2=uninstrumented\nfun:__bid_ledd2=uninstrumented\nfun:__bid_lesd2=uninstrumented\nfun:__bid_letd2=uninstrumented\nfun:__bid_ltdd2=uninstrumented\nfun:__bid_ltsd2=uninstrumented\nfun:__bid_lttd2=uninstrumented\nfun:__bid_muldd3=uninstrumented\nfun:__bid_mulsd3=uninstrumented\nfun:__bid_multd3=uninstrumented\nfun:__bid_nedd2=uninstrumented\nfun:__bid_nesd2=uninstrumented\nfun:__bid_netd2=uninstrumented\nfun:__bid_round128_19_38=uninstrumented\nfun:__bid_round192_39_57=uninstrumented\nfun:__bid_round256_58_76=uninstrumented\nfun:__bid_round64_2_18=uninstrumented\nfun:__bid_subdd3=uninstrumented\nfun:__bid_subsd3=uninstrumented\nfun:__bid_subtd3=uninstrumented\nfun:__bid_truncdddf=uninstrumented\nfun:__bid_truncddsd2=uninstrumented\nfun:__bid_truncddsf=uninstrumented\nfun:__bid_truncdfsd=uninstrumented\nfun:__bid_truncsdsf=uninstrumented\nfun:__bid_trunctddd2=uninstrumented\nfun:__bid_trunctddf=uninstrumented\nfun:__bid_trunctdsd2=uninstrumented\nfun:__bid_trunctdsf=uninstrumented\nfun:__bid_trunctdtf=uninstrumented\nfun:__bid_trunctdxf=uninstrumented\nfun:__bid_trunctfdd=uninstrumented\nfun:__bid_trunctfsd=uninstrumented\nfun:__bid_truncxfdd=uninstrumented\nfun:__bid_truncxfsd=uninstrumented\nfun:__bid_unorddd2=uninstrumented\nfun:__bid_unordsd2=uninstrumented\nfun:__bid_unordtd2=uninstrumented\nfun:__binary128_to_bid128=uninstrumented\nfun:__binary128_to_bid32=uninstrumented\nfun:__binary128_to_bid64=uninstrumented\nfun:__binary32_to_bid128=uninstrumented\nfun:__binary32_to_bid32=uninstrumented\nfun:__binary32_to_bid64=uninstrumented\nfun:__binary64_to_bid128=uninstrumented\nfun:__binary64_to_bid32=uninstrumented\nfun:__binary64_to_bid64=uninstrumented\nfun:__binary80_to_bid128=uninstrumented\nfun:__binary80_to_bid32=uninstrumented\nfun:__binary80_to_bid64=uninstrumented\nfun:__bsd_getpgrp=uninstrumented\nfun:__bswapdi2=uninstrumented\nfun:__bswapsi2=uninstrumented\nfun:__bzero=uninstrumented\nfun:__call_tls_dtors=uninstrumented\nfun:__chk_fail=uninstrumented\nfun:__clear_cache=uninstrumented\nfun:__clock_gettime=uninstrumented\nfun:__clog10=uninstrumented\nfun:__clog10f=uninstrumented\nfun:__clog10l=uninstrumented\nfun:__clone=uninstrumented\nfun:__close=uninstrumented\nfun:__close_nocancel=uninstrumented\nfun:__clrsbdi2=uninstrumented\nfun:__clrsbti2=uninstrumented\nfun:__clzdi2=uninstrumented\nfun:__clzti2=uninstrumented\nfun:__cmpti2=uninstrumented\nfun:__cmsg_nxthdr=uninstrumented\nfun:__confstr_chk=uninstrumented\nfun:__connect=uninstrumented\nfun:__copy_grp=uninstrumented\nfun:__cosh_finite=uninstrumented\nfun:__coshf128_finite=uninstrumented\nfun:__coshf_finite=uninstrumented\nfun:__coshl_finite=uninstrumented\nfun:__cpu_indicator_init=uninstrumented\nfun:__create_ib_request=uninstrumented\nfun:__ctype_b_loc=uninstrumented\nfun:__ctype_get_mb_cur_max=uninstrumented\nfun:__ctype_init=uninstrumented\nfun:__ctype_tolower_loc=uninstrumented\nfun:__ctype_toupper_loc=uninstrumented\nfun:__ctzdi2=uninstrumented\nfun:__ctzti2=uninstrumented\nfun:__cxa_at_quick_exit=uninstrumented\nfun:__cxa_atexit=uninstrumented\nfun:__cxa_finalize=uninstrumented\nfun:__cxa_thread_atexit_impl=uninstrumented\nfun:__cyg_profile_func_enter=uninstrumented\nfun:__cyg_profile_func_exit=uninstrumented\nfun:__dcgettext=uninstrumented\nfun:__default_morecore=uninstrumented\nfun:__deregister_frame=uninstrumented\nfun:__deregister_frame_info=uninstrumented\nfun:__deregister_frame_info_bases=uninstrumented\nfun:__dfp_clear_except=uninstrumented\nfun:__dfp_get_round=uninstrumented\nfun:__dfp_raise_except=uninstrumented\nfun:__dfp_set_round=uninstrumented\nfun:__dfp_test_except=uninstrumented\nfun:__dgettext=uninstrumented\nfun:__divdc3=uninstrumented\nfun:__divhc3=uninstrumented\nfun:__divmodbitint4=uninstrumented\nfun:__divmodti4=uninstrumented\nfun:__divsc3=uninstrumented\nfun:__divtc3=uninstrumented\nfun:__divtf3=uninstrumented\nfun:__divti3=uninstrumented\nfun:__divxc3=uninstrumented\nfun:__dn_comp=uninstrumented\nfun:__dn_count_labels=uninstrumented\nfun:__dn_expand=uninstrumented\nfun:__dn_skipname=uninstrumented\nfun:__do_niscall3=uninstrumented\nfun:__dprintf_chk=uninstrumented\nfun:__dup2=uninstrumented\nfun:__duplocale=uninstrumented\nfun:__emutls_get_address=uninstrumented\nfun:__emutls_register_common=uninstrumented\nfun:__enable_execute_stack=uninstrumented\nfun:__endmntent=uninstrumented\nfun:__eprintf=uninstrumented\nfun:__eqhf2=uninstrumented\nfun:__eqtf2=uninstrumented\nfun:__errno_location=uninstrumented\nfun:__exp10_finite=uninstrumented\nfun:__exp10f128_finite=uninstrumented\nfun:__exp10f_finite=uninstrumented\nfun:__exp10l_finite=uninstrumented\nfun:__exp2_finite=uninstrumented\nfun:__exp2f128_finite=uninstrumented\nfun:__exp2f_finite=uninstrumented\nfun:__exp2l_finite=uninstrumented\nfun:__exp_finite=uninstrumented\nfun:__expf128_finite=uninstrumented\nfun:__expf_finite=uninstrumented\nfun:__expl_finite=uninstrumented\nfun:__explicit_bzero_chk=uninstrumented\nfun:__extendbfsf2=uninstrumented\nfun:__extenddftf2=uninstrumented\nfun:__extendhfdf2=uninstrumented\nfun:__extendhfsf2=uninstrumented\nfun:__extendhftf2=uninstrumented\nfun:__extendhfxf2=uninstrumented\nfun:__extendsfdf2=uninstrumented\nfun:__extendsftf2=uninstrumented\nfun:__extendxftf2=uninstrumented\nfun:__fbufsize=uninstrumented\nfun:__fcntl=uninstrumented\nfun:__fdelt_chk=uninstrumented\nfun:__fdelt_warn=uninstrumented\nfun:__fentry__=uninstrumented\nfun:__ffs=uninstrumented\nfun:__ffsdi2=uninstrumented\nfun:__ffsti2=uninstrumented\nfun:__fgets_chk=uninstrumented\nfun:__fgets_unlocked_chk=uninstrumented\nfun:__fgetws_chk=uninstrumented\nfun:__fgetws_unlocked_chk=uninstrumented\nfun:__file_change_detection_for_fp=uninstrumented\nfun:__file_change_detection_for_path=uninstrumented\nfun:__file_change_detection_for_stat=uninstrumented\nfun:__file_is_unchanged=uninstrumented\nfun:__finite=uninstrumented\nfun:__finitef=uninstrumented\nfun:__finitef128=uninstrumented\nfun:__finitel=uninstrumented\nfun:__fixdfbitint=uninstrumented\nfun:__fixdfti=uninstrumented\nfun:__fixhfti=uninstrumented\nfun:__fixsfbitint=uninstrumented\nfun:__fixsfti=uninstrumented\nfun:__fixtfbitint=uninstrumented\nfun:__fixtfdi=uninstrumented\nfun:__fixtfsi=uninstrumented\nfun:__fixtfti=uninstrumented\nfun:__fixunsdfdi=uninstrumented\nfun:__fixunsdfti=uninstrumented\nfun:__fixunshfti=uninstrumented\nfun:__fixunssfdi=uninstrumented\nfun:__fixunssfti=uninstrumented\nfun:__fixunstfdi=uninstrumented\nfun:__fixunstfsi=uninstrumented\nfun:__fixunstfti=uninstrumented\nfun:__fixunsxfdi=uninstrumented\nfun:__fixunsxfti=uninstrumented\nfun:__fixxfbitint=uninstrumented\nfun:__fixxfti=uninstrumented\nfun:__flbf=uninstrumented\nfun:__floatbitintbf=uninstrumented\nfun:__floatbitintdf=uninstrumented\nfun:__floatbitinthf=uninstrumented\nfun:__floatbitintsf=uninstrumented\nfun:__floatbitinttf=uninstrumented\nfun:__floatbitintxf=uninstrumented\nfun:__floatditf=uninstrumented\nfun:__floatsitf=uninstrumented\nfun:__floattibf=uninstrumented\nfun:__floattidf=uninstrumented\nfun:__floattihf=uninstrumented\nfun:__floattisf=uninstrumented\nfun:__floattitf=uninstrumented\nfun:__floattixf=uninstrumented\nfun:__floatunditf=uninstrumented\nfun:__floatunsitf=uninstrumented\nfun:__floatuntibf=uninstrumented\nfun:__floatuntidf=uninstrumented\nfun:__floatuntihf=uninstrumented\nfun:__floatuntisf=uninstrumented\nfun:__floatuntitf=uninstrumented\nfun:__floatuntixf=uninstrumented\nfun:__fmod_finite=uninstrumented\nfun:__fmodf128_finite=uninstrumented\nfun:__fmodf_finite=uninstrumented\nfun:__fmodl_finite=uninstrumented\nfun:__follow_path=uninstrumented\nfun:__fork=uninstrumented\nfun:__fortify_fail=uninstrumented\nfun:__fp_nquery=uninstrumented\nfun:__fp_query=uninstrumented\nfun:__fp_resstat=uninstrumented\nfun:__fpclassify=uninstrumented\nfun:__fpclassifyf=uninstrumented\nfun:__fpclassifyf128=uninstrumented\nfun:__fpclassifyl=uninstrumented\nfun:__fpending=uninstrumented\nfun:__fprintf_chk=uninstrumented\nfun:__fpurge=uninstrumented\nfun:__fread_chk=uninstrumented\nfun:__fread_unlocked_chk=uninstrumented\nfun:__freadable=uninstrumented\nfun:__freading=uninstrumented\nfun:__free_fdresult=uninstrumented\nfun:__freelocale=uninstrumented\nfun:__fseeko64=uninstrumented\nfun:__fsetlocking=uninstrumented\nfun:__fstat64=uninstrumented\nfun:__ftello64=uninstrumented\nfun:__fwprintf_chk=uninstrumented\nfun:__fwritable=uninstrumented\nfun:__fwriting=uninstrumented\nfun:__fxstat=uninstrumented\nfun:__fxstat64=uninstrumented\nfun:__fxstatat=uninstrumented\nfun:__fxstatat64=uninstrumented\nfun:__gamma_r_finite=uninstrumented\nfun:__gammaf128_r_finite=uninstrumented\nfun:__gammaf_r_finite=uninstrumented\nfun:__gammal_r_finite=uninstrumented\nfun:__gcc_bcmp=uninstrumented\nfun:__gcc_nested_func_ptr_created=uninstrumented\nfun:__gcc_nested_func_ptr_deleted=uninstrumented\nfun:__gcc_personality_v0=uninstrumented\nfun:__gconv_create_spec=uninstrumented\nfun:__gconv_destroy_spec=uninstrumented\nfun:__gconv_get_alias_db=uninstrumented\nfun:__gconv_get_cache=uninstrumented\nfun:__gconv_get_modules_db=uninstrumented\nfun:__gconv_open=uninstrumented\nfun:__gconv_transliterate=uninstrumented\nfun:__generic_findstack=uninstrumented\nfun:__generic_morestack=uninstrumented\nfun:__generic_morestack_set_initial_sp=uninstrumented\nfun:__generic_releasestack=uninstrumented\nfun:__getauxval=uninstrumented\nfun:__getcwd_chk=uninstrumented\nfun:__getdelim=uninstrumented\nfun:__getdomainname_chk=uninstrumented\nfun:__getf2=uninstrumented\nfun:__getgroups_chk=uninstrumented\nfun:__gethostname_chk=uninstrumented\nfun:__getlogin_r_chk=uninstrumented\nfun:__getmntent_r=uninstrumented\nfun:__getpagesize=uninstrumented\nfun:__getpgid=uninstrumented\nfun:__getpid=uninstrumented\nfun:__getrlimit=uninstrumented\nfun:__gets_chk=uninstrumented\nfun:__gettimeofday=uninstrumented\nfun:__getwd_chk=uninstrumented\nfun:__gmtime_r=uninstrumented\nfun:__gttf2=uninstrumented\nfun:__h_errno_location=uninstrumented\nfun:__hardcfr_check=uninstrumented\nfun:__hostalias=uninstrumented\nfun:__hypot_finite=uninstrumented\nfun:__hypotf128_finite=uninstrumented\nfun:__hypotf_finite=uninstrumented\nfun:__hypotl_finite=uninstrumented\nfun:__idna_from_dns_encoding=uninstrumented\nfun:__idna_to_dns_encoding=uninstrumented\nfun:__inet6_scopeid_pton=uninstrumented\nfun:__inet_aton_exact=uninstrumented\nfun:__inet_pton_length=uninstrumented\nfun:__internal_endnetgrent=uninstrumented\nfun:__internal_getnetgrent_r=uninstrumented\nfun:__internal_setnetgrent=uninstrumented\nfun:__isalnum_l=uninstrumented\nfun:__isalpha_l=uninstrumented\nfun:__isascii_l=uninstrumented\nfun:__isblank_l=uninstrumented\nfun:__iscanonicall=uninstrumented\nfun:__iscntrl_l=uninstrumented\nfun:__isctype=uninstrumented\nfun:__isdigit_l=uninstrumented\nfun:__iseqsig=uninstrumented\nfun:__iseqsigf=uninstrumented\nfun:__iseqsigf128=uninstrumented\nfun:__iseqsigl=uninstrumented\nfun:__isgraph_l=uninstrumented\nfun:__isinf=uninstrumented\nfun:__isinff=uninstrumented\nfun:__isinff128=uninstrumented\nfun:__isinfl=uninstrumented\nfun:__islower_l=uninstrumented\nfun:__isnan=uninstrumented\nfun:__isnanf=uninstrumented\nfun:__isnanf128=uninstrumented\nfun:__isnanl=uninstrumented\nfun:__isoc23_fscanf=uninstrumented\nfun:__isoc23_fwscanf=uninstrumented\nfun:__isoc23_scanf=uninstrumented\nfun:__isoc23_sscanf=uninstrumented\nfun:__isoc23_strtoimax=uninstrumented\nfun:__isoc23_strtol=uninstrumented\nfun:__isoc23_strtol_l=uninstrumented\nfun:__isoc23_strtoll=uninstrumented\nfun:__isoc23_strtoll_l=uninstrumented\nfun:__isoc23_strtoul=uninstrumented\nfun:__isoc23_strtoul_l=uninstrumented\nfun:__isoc23_strtoull=uninstrumented\nfun:__isoc23_strtoull_l=uninstrumented\nfun:__isoc23_strtoumax=uninstrumented\nfun:__isoc23_swscanf=uninstrumented\nfun:__isoc23_vfscanf=uninstrumented\nfun:__isoc23_vfwscanf=uninstrumented\nfun:__isoc23_vscanf=uninstrumented\nfun:__isoc23_vsscanf=uninstrumented\nfun:__isoc23_vswscanf=uninstrumented\nfun:__isoc23_vwscanf=uninstrumented\nfun:__isoc23_wcstoimax=uninstrumented\nfun:__isoc23_wcstol=uninstrumented\nfun:__isoc23_wcstol_l=uninstrumented\nfun:__isoc23_wcstoll=uninstrumented\nfun:__isoc23_wcstoll_l=uninstrumented\nfun:__isoc23_wcstoul=uninstrumented\nfun:__isoc23_wcstoul_l=uninstrumented\nfun:__isoc23_wcstoull=uninstrumented\nfun:__isoc23_wcstoull_l=uninstrumented\nfun:__isoc23_wcstoumax=uninstrumented\nfun:__isoc23_wscanf=uninstrumented\nfun:__isoc99_fscanf=uninstrumented\nfun:__isoc99_fwscanf=uninstrumented\nfun:__isoc99_scanf=uninstrumented\nfun:__isoc99_sscanf=uninstrumented\nfun:__isoc99_swscanf=uninstrumented\nfun:__isoc99_vfscanf=uninstrumented\nfun:__isoc99_vfwscanf=uninstrumented\nfun:__isoc99_vscanf=uninstrumented\nfun:__isoc99_vsscanf=uninstrumented\nfun:__isoc99_vswscanf=uninstrumented\nfun:__isoc99_vwscanf=uninstrumented\nfun:__isoc99_wscanf=uninstrumented\nfun:__isprint_l=uninstrumented\nfun:__ispunct_l=uninstrumented\nfun:__issignaling=uninstrumented\nfun:__issignalingf=uninstrumented\nfun:__issignalingf128=uninstrumented\nfun:__issignalingl=uninstrumented\nfun:__isspace_l=uninstrumented\nfun:__isupper_l=uninstrumented\nfun:__iswalnum_l=uninstrumented\nfun:__iswalpha_l=uninstrumented\nfun:__iswblank_l=uninstrumented\nfun:__iswcntrl_l=uninstrumented\nfun:__iswctype=uninstrumented\nfun:__iswctype_l=uninstrumented\nfun:__iswdigit_l=uninstrumented\nfun:__iswgraph_l=uninstrumented\nfun:__iswlower_l=uninstrumented\nfun:__iswprint_l=uninstrumented\nfun:__iswpunct_l=uninstrumented\nfun:__iswspace_l=uninstrumented\nfun:__iswupper_l=uninstrumented\nfun:__iswxdigit_l=uninstrumented\nfun:__isxdigit_l=uninstrumented\nfun:__ivaliduser=uninstrumented\nfun:__j0_finite=uninstrumented\nfun:__j0f128_finite=uninstrumented\nfun:__j0f_finite=uninstrumented\nfun:__j0l_finite=uninstrumented\nfun:__j1_finite=uninstrumented\nfun:__j1f128_finite=uninstrumented\nfun:__j1f_finite=uninstrumented\nfun:__j1l_finite=uninstrumented\nfun:__jn_finite=uninstrumented\nfun:__jnf128_finite=uninstrumented\nfun:__jnf_finite=uninstrumented\nfun:__jnl_finite=uninstrumented\nfun:__letf2=uninstrumented\nfun:__lgamma_r_finite=uninstrumented\nfun:__lgammaf128_r_finite=uninstrumented\nfun:__lgammaf_r_finite=uninstrumented\nfun:__lgammal_r_finite=uninstrumented\nfun:__libanl_version_placeholder=uninstrumented\nfun:__libc_alloc_buffer_alloc_array=uninstrumented\nfun:__libc_alloc_buffer_allocate=uninstrumented\nfun:__libc_alloc_buffer_copy_bytes=uninstrumented\nfun:__libc_alloc_buffer_copy_string=uninstrumented\nfun:__libc_alloc_buffer_create_failure=uninstrumented\nfun:__libc_alloca_cutoff=uninstrumented\nfun:__libc_allocate_once_slow=uninstrumented\nfun:__libc_allocate_rtsig=uninstrumented\nfun:__libc_calloc=uninstrumented\nfun:__libc_clntudp_bufcreate=uninstrumented\nfun:__libc_current_sigrtmax=uninstrumented\nfun:__libc_current_sigrtmin=uninstrumented\nfun:__libc_dn_expand=uninstrumented\nfun:__libc_dn_skipname=uninstrumented\nfun:__libc_dynarray_at_failure=uninstrumented\nfun:__libc_dynarray_emplace_enlarge=uninstrumented\nfun:__libc_dynarray_finalize=uninstrumented\nfun:__libc_dynarray_resize=uninstrumented\nfun:__libc_dynarray_resize_clear=uninstrumented\nfun:__libc_early_init=uninstrumented\nfun:__libc_fatal=uninstrumented\nfun:__libc_fcntl64=uninstrumented\nfun:__libc_fork=uninstrumented\nfun:__libc_free=uninstrumented\nfun:__libc_freeres=uninstrumented\nfun:__libc_ifunc_impl_list=uninstrumented\nfun:__libc_init_first=uninstrumented\nfun:__libc_mallinfo=uninstrumented\nfun:__libc_malloc=uninstrumented\nfun:__libc_mallopt=uninstrumented\nfun:__libc_memalign=uninstrumented\nfun:__libc_msgrcv=uninstrumented\nfun:__libc_msgsnd=uninstrumented\nfun:__libc_ns_makecanon=uninstrumented\nfun:__libc_ns_samename=uninstrumented\nfun:__libc_pread=uninstrumented\nfun:__libc_pvalloc=uninstrumented\nfun:__libc_pwrite=uninstrumented\nfun:__libc_realloc=uninstrumented\nfun:__libc_reallocarray=uninstrumented\nfun:__libc_res_dnok=uninstrumented\nfun:__libc_res_hnok=uninstrumented\nfun:__libc_res_nameinquery=uninstrumented\nfun:__libc_res_queriesmatch=uninstrumented\nfun:__libc_rpc_getport=uninstrumented\nfun:__libc_sa_len=uninstrumented\nfun:__libc_scratch_buffer_grow=uninstrumented\nfun:__libc_scratch_buffer_grow_preserve=uninstrumented\nfun:__libc_scratch_buffer_set_array_size=uninstrumented\nfun:__libc_secure_getenv=uninstrumented\nfun:__libc_sigaction=uninstrumented\nfun:__libc_start_main=uninstrumented\nfun:__libc_system=uninstrumented\nfun:__libc_unwind_link_get=uninstrumented\nfun:__libc_valloc=uninstrumented\nfun:__libdl_version_placeholder=uninstrumented\nfun:__libpthread_version_placeholder=uninstrumented\nfun:__librt_version_placeholder=uninstrumented\nfun:__libutil_version_placeholder=uninstrumented\nfun:__lll_lock_wait_private=uninstrumented\nfun:__lll_lock_wake_private=uninstrumented\nfun:__loc_aton=uninstrumented\nfun:__loc_ntoa=uninstrumented\nfun:__log10_finite=uninstrumented\nfun:__log10f128_finite=uninstrumented\nfun:__log10f_finite=uninstrumented\nfun:__log10l_finite=uninstrumented\nfun:__log2_finite=uninstrumented\nfun:__log2f128_finite=uninstrumented\nfun:__log2f_finite=uninstrumented\nfun:__log2l_finite=uninstrumented\nfun:__log_finite=uninstrumented\nfun:__logf128_finite=uninstrumented\nfun:__logf_finite=uninstrumented\nfun:__logl_finite=uninstrumented\nfun:__longjmp_chk=uninstrumented\nfun:__lseek=uninstrumented\nfun:__lshrti3=uninstrumented\nfun:__lttf2=uninstrumented\nfun:__lxstat=uninstrumented\nfun:__lxstat64=uninstrumented\nfun:__madvise=uninstrumented\nfun:__mbrlen=uninstrumented\nfun:__mbrtowc=uninstrumented\nfun:__mbsnrtowcs_chk=uninstrumented\nfun:__mbsrtowcs_chk=uninstrumented\nfun:__mbstowcs_chk=uninstrumented\nfun:__memcmpeq=uninstrumented\nfun:__memcpy_chk=uninstrumented\nfun:__memmove_chk=uninstrumented\nfun:__mempcpy=uninstrumented\nfun:__mempcpy_chk=uninstrumented\nfun:__mempcpy_small=uninstrumented\nfun:__memset_chk=uninstrumented\nfun:__merge_grp=uninstrumented\nfun:__mktemp=uninstrumented\nfun:__mmap=uninstrumented\nfun:__modti3=uninstrumented\nfun:__monstartup=uninstrumented\nfun:__morestack=uninstrumented\nfun:__morestack_allocate_stack_space=uninstrumented\nfun:__morestack_block_signals=uninstrumented\nfun:__morestack_fail=uninstrumented\nfun:__morestack_get_guard=uninstrumented\nfun:__morestack_large_model=uninstrumented\nfun:__morestack_load_mmap=uninstrumented\nfun:__morestack_make_guard=uninstrumented\nfun:__morestack_non_split=uninstrumented\nfun:__morestack_release_segments=uninstrumented\nfun:__morestack_set_guard=uninstrumented\nfun:__morestack_unblock_signals=uninstrumented\nfun:__mprotect=uninstrumented\nfun:__mq_open_2=uninstrumented\nfun:__mulbitint3=uninstrumented\nfun:__muldc3=uninstrumented\nfun:__mulhc3=uninstrumented\nfun:__mulsc3=uninstrumented\nfun:__multc3=uninstrumented\nfun:__multf3=uninstrumented\nfun:__multi3=uninstrumented\nfun:__mulvdi3=uninstrumented\nfun:__mulvsi3=uninstrumented\nfun:__mulvti3=uninstrumented\nfun:__mulxc3=uninstrumented\nfun:__munmap=uninstrumented\nfun:__nanosleep=uninstrumented\nfun:__negtf2=uninstrumented\nfun:__negti2=uninstrumented\nfun:__negvdi2=uninstrumented\nfun:__negvsi2=uninstrumented\nfun:__negvti2=uninstrumented\nfun:__nehf2=uninstrumented\nfun:__netf2=uninstrumented\nfun:__netlink_assert_response=uninstrumented\nfun:__newlocale=uninstrumented\nfun:__nis_default_access=uninstrumented\nfun:__nis_default_group=uninstrumented\nfun:__nis_default_owner=uninstrumented\nfun:__nis_default_ttl=uninstrumented\nfun:__nis_finddirectory=uninstrumented\nfun:__nis_hash=uninstrumented\nfun:__nisbind_connect=uninstrumented\nfun:__nisbind_create=uninstrumented\nfun:__nisbind_destroy=uninstrumented\nfun:__nisbind_next=uninstrumented\nfun:__nl_langinfo_l=uninstrumented\nfun:__nptl_change_stack_perm=uninstrumented\nfun:__nptl_create_event=uninstrumented\nfun:__nptl_death_event=uninstrumented\nfun:__ns_get16=uninstrumented\nfun:__ns_get32=uninstrumented\nfun:__ns_name_compress=uninstrumented\nfun:__ns_name_ntop=uninstrumented\nfun:__ns_name_pack=uninstrumented\nfun:__ns_name_pton=uninstrumented\nfun:__ns_name_skip=uninstrumented\nfun:__ns_name_uncompress=uninstrumented\nfun:__ns_name_unpack=uninstrumented\nfun:__nss_configure_lookup=uninstrumented\nfun:__nss_database_get=uninstrumented\nfun:__nss_database_lookup=uninstrumented\nfun:__nss_disable_nscd=uninstrumented\nfun:__nss_files_data_endent=uninstrumented\nfun:__nss_files_data_open=uninstrumented\nfun:__nss_files_data_put=uninstrumented\nfun:__nss_files_data_setent=uninstrumented\nfun:__nss_files_fopen=uninstrumented\nfun:__nss_group_lookup=uninstrumented\nfun:__nss_group_lookup2=uninstrumented\nfun:__nss_hash=uninstrumented\nfun:__nss_hostname_digits_dots=uninstrumented\nfun:__nss_hosts_lookup=uninstrumented\nfun:__nss_hosts_lookup2=uninstrumented\nfun:__nss_lookup=uninstrumented\nfun:__nss_lookup_function=uninstrumented\nfun:__nss_next=uninstrumented\nfun:__nss_next2=uninstrumented\nfun:__nss_parse_line_result=uninstrumented\nfun:__nss_passwd_lookup=uninstrumented\nfun:__nss_passwd_lookup2=uninstrumented\nfun:__nss_readline=uninstrumented\nfun:__nss_services_lookup2=uninstrumented\nfun:__obstack_printf_chk=uninstrumented\nfun:__obstack_vprintf_chk=uninstrumented\nfun:__open=uninstrumented\nfun:__open64=uninstrumented\nfun:__open64_2=uninstrumented\nfun:__open64_nocancel=uninstrumented\nfun:__open_2=uninstrumented\nfun:__open_catalog=uninstrumented\nfun:__open_nocancel=uninstrumented\nfun:__openat64_2=uninstrumented\nfun:__openat_2=uninstrumented\nfun:__overflow=uninstrumented\nfun:__p_cdname=uninstrumented\nfun:__p_cdnname=uninstrumented\nfun:__p_class=uninstrumented\nfun:__p_fqname=uninstrumented\nfun:__p_fqnname=uninstrumented\nfun:__p_option=uninstrumented\nfun:__p_query=uninstrumented\nfun:__p_rcode=uninstrumented\nfun:__p_secstodate=uninstrumented\nfun:__p_time=uninstrumented\nfun:__p_type=uninstrumented\nfun:__paritydi2=uninstrumented\nfun:__parityti2=uninstrumented\nfun:__pipe=uninstrumented\nfun:__poll=uninstrumented\nfun:__poll_chk=uninstrumented\nfun:__popcountdi2=uninstrumented\nfun:__popcountti2=uninstrumented\nfun:__posix_getopt=uninstrumented\nfun:__pow_finite=uninstrumented\nfun:__powf128_finite=uninstrumented\nfun:__powf_finite=uninstrumented\nfun:__powidf2=uninstrumented\nfun:__powisf2=uninstrumented\nfun:__powitf2=uninstrumented\nfun:__powixf2=uninstrumented\nfun:__powl_finite=uninstrumented\nfun:__ppoll_chk=uninstrumented\nfun:__pread64=uninstrumented\nfun:__pread64_chk=uninstrumented\nfun:__pread64_nocancel=uninstrumented\nfun:__pread_chk=uninstrumented\nfun:__prepare_niscall=uninstrumented\nfun:__printf_chk=uninstrumented\nfun:__printf_fp=uninstrumented\nfun:__profile_frequency=uninstrumented\nfun:__pthread_atfork=uninstrumented\nfun:__pthread_cleanup_routine=uninstrumented\nfun:__pthread_get_minstack=uninstrumented\nfun:__pthread_getspecific=uninstrumented\nfun:__pthread_key_create=uninstrumented\nfun:__pthread_mutex_destroy=uninstrumented\nfun:__pthread_mutex_init=uninstrumented\nfun:__pthread_mutex_lock=uninstrumented\nfun:__pthread_mutex_trylock=uninstrumented\nfun:__pthread_mutex_unlock=uninstrumented\nfun:__pthread_mutexattr_destroy=uninstrumented\nfun:__pthread_mutexattr_init=uninstrumented\nfun:__pthread_mutexattr_settype=uninstrumented\nfun:__pthread_once=uninstrumented\nfun:__pthread_register_cancel=uninstrumented\nfun:__pthread_register_cancel_defer=uninstrumented\nfun:__pthread_rwlock_destroy=uninstrumented\nfun:__pthread_rwlock_init=uninstrumented\nfun:__pthread_rwlock_rdlock=uninstrumented\nfun:__pthread_rwlock_tryrdlock=uninstrumented\nfun:__pthread_rwlock_trywrlock=uninstrumented\nfun:__pthread_rwlock_unlock=uninstrumented\nfun:__pthread_rwlock_wrlock=uninstrumented\nfun:__pthread_setspecific=uninstrumented\nfun:__pthread_unregister_cancel=uninstrumented\nfun:__pthread_unregister_cancel_restore=uninstrumented\nfun:__pthread_unwind_next=uninstrumented\nfun:__ptsname_r_chk=uninstrumented\nfun:__putlong=uninstrumented\nfun:__putshort=uninstrumented\nfun:__pwrite64=uninstrumented\nfun:__rawmemchr=uninstrumented\nfun:__read=uninstrumented\nfun:__read_chk=uninstrumented\nfun:__read_nocancel=uninstrumented\nfun:__readlink_chk=uninstrumented\nfun:__readlinkat_chk=uninstrumented\nfun:__realpath_chk=uninstrumented\nfun:__recv=uninstrumented\nfun:__recv_chk=uninstrumented\nfun:__recvfrom_chk=uninstrumented\nfun:__register_atfork=uninstrumented\nfun:__register_frame=uninstrumented\nfun:__register_frame_info=uninstrumented\nfun:__register_frame_info_bases=uninstrumented\nfun:__register_frame_info_table=uninstrumented\nfun:__register_frame_info_table_bases=uninstrumented\nfun:__register_frame_table=uninstrumented\nfun:__remainder_finite=uninstrumented\nfun:__remainderf128_finite=uninstrumented\nfun:__remainderf_finite=uninstrumented\nfun:__remainderl_finite=uninstrumented\nfun:__res_close=uninstrumented\nfun:__res_context_hostalias=uninstrumented\nfun:__res_context_mkquery=uninstrumented\nfun:__res_context_query=uninstrumented\nfun:__res_context_search=uninstrumented\nfun:__res_context_send=uninstrumented\nfun:__res_dnok=uninstrumented\nfun:__res_get_nsaddr=uninstrumented\nfun:__res_hnok=uninstrumented\nfun:__res_hostalias=uninstrumented\nfun:__res_iclose=uninstrumented\nfun:__res_init=uninstrumented\nfun:__res_isourserver=uninstrumented\nfun:__res_mailok=uninstrumented\nfun:__res_mkquery=uninstrumented\nfun:__res_nameinquery=uninstrumented\nfun:__res_nclose=uninstrumented\nfun:__res_ninit=uninstrumented\nfun:__res_nmkquery=uninstrumented\nfun:__res_nopt=uninstrumented\nfun:__res_nquery=uninstrumented\nfun:__res_nquerydomain=uninstrumented\nfun:__res_nsearch=uninstrumented\nfun:__res_nsend=uninstrumented\nfun:__res_ownok=uninstrumented\nfun:__res_queriesmatch=uninstrumented\nfun:__res_query=uninstrumented\nfun:__res_querydomain=uninstrumented\nfun:__res_randomid=uninstrumented\nfun:__res_search=uninstrumented\nfun:__res_send=uninstrumented\nfun:__res_state=uninstrumented\nfun:__resolv_context_get=uninstrumented\nfun:__resolv_context_get_override=uninstrumented\nfun:__resolv_context_get_preinit=uninstrumented\nfun:__resolv_context_put=uninstrumented\nfun:__rpc_thread_createerr=uninstrumented\nfun:__rpc_thread_svc_fdset=uninstrumented\nfun:__rpc_thread_svc_max_pollfd=uninstrumented\nfun:__rpc_thread_svc_pollfd=uninstrumented\nfun:__rtld_version_placeholder=uninstrumented\nfun:__sbrk=uninstrumented\nfun:__scalb_finite=uninstrumented\nfun:__scalbf_finite=uninstrumented\nfun:__scalbl_finite=uninstrumented\nfun:__sched_cpualloc=uninstrumented\nfun:__sched_cpucount=uninstrumented\nfun:__sched_cpufree=uninstrumented\nfun:__sched_get_priority_max=uninstrumented\nfun:__sched_get_priority_min=uninstrumented\nfun:__sched_getparam=uninstrumented\nfun:__sched_getscheduler=uninstrumented\nfun:__sched_setscheduler=uninstrumented\nfun:__sched_yield=uninstrumented\nfun:__secure_getenv=uninstrumented\nfun:__select=uninstrumented\nfun:__send=uninstrumented\nfun:__sendmmsg=uninstrumented\nfun:__setmntent=uninstrumented\nfun:__setpgid=uninstrumented\nfun:__sfp_handle_exceptions=uninstrumented\nfun:__shm_get_name=uninstrumented\nfun:__sigaction=uninstrumented\nfun:__sigaddset=uninstrumented\nfun:__sigdelset=uninstrumented\nfun:__sigismember=uninstrumented\nfun:__signbit=uninstrumented\nfun:__signbitf=uninstrumented\nfun:__signbitf128=uninstrumented\nfun:__signbitl=uninstrumented\nfun:__sigpause=uninstrumented\nfun:__sigsetjmp=uninstrumented\nfun:__sigsuspend=uninstrumented\nfun:__sigtimedwait=uninstrumented\nfun:__sinh_finite=uninstrumented\nfun:__sinhf128_finite=uninstrumented\nfun:__sinhf_finite=uninstrumented\nfun:__sinhl_finite=uninstrumented\nfun:__snprintf_chk=uninstrumented\nfun:__socket=uninstrumented\nfun:__splitstack_block_signals=uninstrumented\nfun:__splitstack_block_signals_context=uninstrumented\nfun:__splitstack_find=uninstrumented\nfun:__splitstack_find_context=uninstrumented\nfun:__splitstack_getcontext=uninstrumented\nfun:__splitstack_makecontext=uninstrumented\nfun:__splitstack_releasecontext=uninstrumented\nfun:__splitstack_resetcontext=uninstrumented\nfun:__splitstack_setcontext=uninstrumented\nfun:__sprintf_chk=uninstrumented\nfun:__sqrt_finite=uninstrumented\nfun:__sqrtf128_finite=uninstrumented\nfun:__sqrtf_finite=uninstrumented\nfun:__sqrtl_finite=uninstrumented\nfun:__sse_resms64_12=uninstrumented\nfun:__sse_resms64_13=uninstrumented\nfun:__sse_resms64_14=uninstrumented\nfun:__sse_resms64_15=uninstrumented\nfun:__sse_resms64_16=uninstrumented\nfun:__sse_resms64_17=uninstrumented\nfun:__sse_resms64_18=uninstrumented\nfun:__sse_resms64f_12=uninstrumented\nfun:__sse_resms64f_13=uninstrumented\nfun:__sse_resms64f_14=uninstrumented\nfun:__sse_resms64f_15=uninstrumented\nfun:__sse_resms64f_16=uninstrumented\nfun:__sse_resms64f_17=uninstrumented\nfun:__sse_resms64fx_12=uninstrumented\nfun:__sse_resms64fx_13=uninstrumented\nfun:__sse_resms64fx_14=uninstrumented\nfun:__sse_resms64fx_15=uninstrumented\nfun:__sse_resms64fx_16=uninstrumented\nfun:__sse_resms64fx_17=uninstrumented\nfun:__sse_resms64x_12=uninstrumented\nfun:__sse_resms64x_13=uninstrumented\nfun:__sse_resms64x_14=uninstrumented\nfun:__sse_resms64x_15=uninstrumented\nfun:__sse_resms64x_16=uninstrumented\nfun:__sse_resms64x_17=uninstrumented\nfun:__sse_resms64x_18=uninstrumented\nfun:__sse_savms64_12=uninstrumented\nfun:__sse_savms64_13=uninstrumented\nfun:__sse_savms64_14=uninstrumented\nfun:__sse_savms64_15=uninstrumented\nfun:__sse_savms64_16=uninstrumented\nfun:__sse_savms64_17=uninstrumented\nfun:__sse_savms64_18=uninstrumented\nfun:__sse_savms64f_12=uninstrumented\nfun:__sse_savms64f_13=uninstrumented\nfun:__sse_savms64f_14=uninstrumented\nfun:__sse_savms64f_15=uninstrumented\nfun:__sse_savms64f_16=uninstrumented\nfun:__sse_savms64f_17=uninstrumented\nfun:__stack_chk_fail=uninstrumented\nfun:__stack_chk_fail_local=uninstrumented\nfun:__stack_split_initialize=uninstrumented\nfun:__statfs=uninstrumented\nfun:__stpcpy=uninstrumented\nfun:__stpcpy_chk=uninstrumented\nfun:__stpcpy_small=uninstrumented\nfun:__stpncpy=uninstrumented\nfun:__stpncpy_chk=uninstrumented\nfun:__strcasecmp=uninstrumented\nfun:__strcasecmp_l=uninstrumented\nfun:__strcasestr=uninstrumented\nfun:__strcat_chk=uninstrumented\nfun:__strcoll_l=uninstrumented\nfun:__strcpy_chk=uninstrumented\nfun:__strcpy_small=uninstrumented\nfun:__strcspn_c1=uninstrumented\nfun:__strcspn_c2=uninstrumented\nfun:__strcspn_c3=uninstrumented\nfun:__strdup=uninstrumented\nfun:__strerror_r=uninstrumented\nfun:__strfmon_l=uninstrumented\nfun:__strftime_l=uninstrumented\nfun:__strlcat_chk=uninstrumented\nfun:__strlcpy_chk=uninstrumented\nfun:__strncasecmp_l=uninstrumented\nfun:__strncat_chk=uninstrumented\nfun:__strncpy_chk=uninstrumented\nfun:__strndup=uninstrumented\nfun:__strpbrk_c2=uninstrumented\nfun:__strpbrk_c3=uninstrumented\nfun:__strsep_1c=uninstrumented\nfun:__strsep_2c=uninstrumented\nfun:__strsep_3c=uninstrumented\nfun:__strsep_g=uninstrumented\nfun:__strspn_c1=uninstrumented\nfun:__strspn_c2=uninstrumented\nfun:__strspn_c3=uninstrumented\nfun:__strtod_internal=uninstrumented\nfun:__strtod_l=uninstrumented\nfun:__strtod_nan=uninstrumented\nfun:__strtof128_internal=uninstrumented\nfun:__strtof128_nan=uninstrumented\nfun:__strtof_internal=uninstrumented\nfun:__strtof_l=uninstrumented\nfun:__strtof_nan=uninstrumented\nfun:__strtok_r=uninstrumented\nfun:__strtok_r_1c=uninstrumented\nfun:__strtol_internal=uninstrumented\nfun:__strtol_l=uninstrumented\nfun:__strtold_internal=uninstrumented\nfun:__strtold_l=uninstrumented\nfun:__strtold_nan=uninstrumented\nfun:__strtoll_internal=uninstrumented\nfun:__strtoll_l=uninstrumented\nfun:__strtoul_internal=uninstrumented\nfun:__strtoul_l=uninstrumented\nfun:__strtoull_internal=uninstrumented\nfun:__strtoull_l=uninstrumented\nfun:__strub_enter=uninstrumented\nfun:__strub_leave=uninstrumented\nfun:__strub_update=uninstrumented\nfun:__strverscmp=uninstrumented\nfun:__strxfrm_l=uninstrumented\nfun:__subtf3=uninstrumented\nfun:__subvdi3=uninstrumented\nfun:__subvsi3=uninstrumented\nfun:__subvti3=uninstrumented\nfun:__swprintf_chk=uninstrumented\nfun:__sym_ntop=uninstrumented\nfun:__sym_ntos=uninstrumented\nfun:__sym_ston=uninstrumented\nfun:__sysconf=uninstrumented\nfun:__sysctl=uninstrumented\nfun:__syslog_chk=uninstrumented\nfun:__sysv_signal=uninstrumented\nfun:__tdelete=uninstrumented\nfun:__tfind=uninstrumented\nfun:__tls_get_addr=uninstrumented\nfun:__toascii_l=uninstrumented\nfun:__tolower_l=uninstrumented\nfun:__toupper_l=uninstrumented\nfun:__towctrans=uninstrumented\nfun:__towctrans_l=uninstrumented\nfun:__towlower_l=uninstrumented\nfun:__towupper_l=uninstrumented\nfun:__truncdfbf2=uninstrumented\nfun:__truncdfhf2=uninstrumented\nfun:__truncdfsf2=uninstrumented\nfun:__trunchfbf2=uninstrumented\nfun:__truncsfbf2=uninstrumented\nfun:__truncsfhf2=uninstrumented\nfun:__trunctfbf2=uninstrumented\nfun:__trunctfdf2=uninstrumented\nfun:__trunctfhf2=uninstrumented\nfun:__trunctfsf2=uninstrumented\nfun:__trunctfxf2=uninstrumented\nfun:__truncxfbf2=uninstrumented\nfun:__truncxfhf2=uninstrumented\nfun:__tsearch=uninstrumented\nfun:__ttyname_r_chk=uninstrumented\nfun:__tunable_get_val=uninstrumented\nfun:__tunable_is_initialized=uninstrumented\nfun:__twalk=uninstrumented\nfun:__twalk_r=uninstrumented\nfun:__ucmpti2=uninstrumented\nfun:__udiv_w_sdiv=uninstrumented\nfun:__udivmodti4=uninstrumented\nfun:__udivti3=uninstrumented\nfun:__uflow=uninstrumented\nfun:__umodti3=uninstrumented\nfun:__underflow=uninstrumented\nfun:__unordtf2=uninstrumented\nfun:__uselocale=uninstrumented\nfun:__vasprintf_chk=uninstrumented\nfun:__vdprintf_chk=uninstrumented\nfun:__vfork=uninstrumented\nfun:__vfprintf_chk=uninstrumented\nfun:__vfscanf=uninstrumented\nfun:__vfwprintf_chk=uninstrumented\nfun:__vprintf_chk=uninstrumented\nfun:__vsnprintf=uninstrumented\nfun:__vsnprintf_chk=uninstrumented\nfun:__vsprintf_chk=uninstrumented\nfun:__vsscanf=uninstrumented\nfun:__vswprintf_chk=uninstrumented\nfun:__vsyslog_chk=uninstrumented\nfun:__vwprintf_chk=uninstrumented\nfun:__wait=uninstrumented\nfun:__waitpid=uninstrumented\nfun:__wcpcpy_chk=uninstrumented\nfun:__wcpncpy_chk=uninstrumented\nfun:__wcrtomb_chk=uninstrumented\nfun:__wcscasecmp_l=uninstrumented\nfun:__wcscat_chk=uninstrumented\nfun:__wcscoll_l=uninstrumented\nfun:__wcscpy_chk=uninstrumented\nfun:__wcsftime_l=uninstrumented\nfun:__wcslcat_chk=uninstrumented\nfun:__wcslcpy_chk=uninstrumented\nfun:__wcsncasecmp_l=uninstrumented\nfun:__wcsncat_chk=uninstrumented\nfun:__wcsncpy_chk=uninstrumented\nfun:__wcsnrtombs_chk=uninstrumented\nfun:__wcsrtombs_chk=uninstrumented\nfun:__wcstod_internal=uninstrumented\nfun:__wcstod_l=uninstrumented\nfun:__wcstof128_internal=uninstrumented\nfun:__wcstof_internal=uninstrumented\nfun:__wcstof_l=uninstrumented\nfun:__wcstol_internal=uninstrumented\nfun:__wcstol_l=uninstrumented\nfun:__wcstold_internal=uninstrumented\nfun:__wcstold_l=uninstrumented\nfun:__wcstoll_internal=uninstrumented\nfun:__wcstoll_l=uninstrumented\nfun:__wcstombs_chk=uninstrumented\nfun:__wcstoul_internal=uninstrumented\nfun:__wcstoul_l=uninstrumented\nfun:__wcstoull_internal=uninstrumented\nfun:__wcstoull_l=uninstrumented\nfun:__wcsxfrm_l=uninstrumented\nfun:__wctomb_chk=uninstrumented\nfun:__wctrans_l=uninstrumented\nfun:__wctype_l=uninstrumented\nfun:__wmemcpy_chk=uninstrumented\nfun:__wmemmove_chk=uninstrumented\nfun:__wmempcpy_chk=uninstrumented\nfun:__wmemset_chk=uninstrumented\nfun:__woverflow=uninstrumented\nfun:__wprintf_chk=uninstrumented\nfun:__wrap_pthread_create=uninstrumented\nfun:__write=uninstrumented\nfun:__write_nocancel=uninstrumented\nfun:__wuflow=uninstrumented\nfun:__wunderflow=uninstrumented\nfun:__x86_get_cpuid_feature_leaf=uninstrumented\nfun:__xmknod=uninstrumented\nfun:__xmknodat=uninstrumented\nfun:__xpg_basename=uninstrumented\nfun:__xpg_sigpause=uninstrumented\nfun:__xpg_strerror_r=uninstrumented\nfun:__xstat=uninstrumented\nfun:__xstat64=uninstrumented\nfun:__y0_finite=uninstrumented\nfun:__y0f128_finite=uninstrumented\nfun:__y0f_finite=uninstrumented\nfun:__y0l_finite=uninstrumented\nfun:__y1_finite=uninstrumented\nfun:__y1f128_finite=uninstrumented\nfun:__y1f_finite=uninstrumented\nfun:__y1l_finite=uninstrumented\nfun:__yn_finite=uninstrumented\nfun:__ynf128_finite=uninstrumented\nfun:__ynf_finite=uninstrumented\nfun:__ynl_finite=uninstrumented\nfun:__yp_check=uninstrumented\nfun:_authenticate=uninstrumented\nfun:_dl_allocate_tls=uninstrumented\nfun:_dl_allocate_tls_init=uninstrumented\nfun:_dl_audit_preinit=uninstrumented\nfun:_dl_audit_symbind_alt=uninstrumented\nfun:_dl_catch_exception=uninstrumented\nfun:_dl_deallocate_tls=uninstrumented\nfun:_dl_debug_state=uninstrumented\nfun:_dl_exception_create=uninstrumented\nfun:_dl_exception_create_format=uninstrumented\nfun:_dl_exception_free=uninstrumented\nfun:_dl_fatal_printf=uninstrumented\nfun:_dl_find_dso_for_object=uninstrumented\nfun:_dl_find_object=uninstrumented\nfun:_dl_get_tls_static_info=uninstrumented\nfun:_dl_mcount=uninstrumented\nfun:_dl_mcount_wrapper=uninstrumented\nfun:_dl_mcount_wrapper_check=uninstrumented\nfun:_dl_rtld_di_serinfo=uninstrumented\nfun:_dl_signal_error=uninstrumented\nfun:_dl_signal_exception=uninstrumented\nfun:_dl_x86_get_cpu_features=uninstrumented\nfun:_exit=uninstrumented\nfun:_flushlbf=uninstrumented\nfun:_gethtbyaddr=uninstrumented\nfun:_gethtbyname=uninstrumented\nfun:_gethtbyname2=uninstrumented\nfun:_gethtent=uninstrumented\nfun:_getlong=uninstrumented\nfun:_getshort=uninstrumented\nfun:_longjmp=uninstrumented\nfun:_mcleanup=uninstrumented\nfun:_mcount=uninstrumented\nfun:_nss_dns_getcanonname_r=uninstrumented\nfun:_nss_dns_gethostbyaddr2_r=uninstrumented\nfun:_nss_dns_gethostbyaddr_r=uninstrumented\nfun:_nss_dns_gethostbyname2_r=uninstrumented\nfun:_nss_dns_gethostbyname3_r=uninstrumented\nfun:_nss_dns_gethostbyname4_r=uninstrumented\nfun:_nss_dns_gethostbyname_r=uninstrumented\nfun:_nss_dns_getnetbyaddr_r=uninstrumented\nfun:_nss_dns_getnetbyname_r=uninstrumented\nfun:_nss_files_endaliasent=uninstrumented\nfun:_nss_files_endetherent=uninstrumented\nfun:_nss_files_endgrent=uninstrumented\nfun:_nss_files_endhostent=uninstrumented\nfun:_nss_files_endnetent=uninstrumented\nfun:_nss_files_endnetgrent=uninstrumented\nfun:_nss_files_endprotoent=uninstrumented\nfun:_nss_files_endpwent=uninstrumented\nfun:_nss_files_endrpcent=uninstrumented\nfun:_nss_files_endservent=uninstrumented\nfun:_nss_files_endsgent=uninstrumented\nfun:_nss_files_endspent=uninstrumented\nfun:_nss_files_getaliasbyname_r=uninstrumented\nfun:_nss_files_getaliasent_r=uninstrumented\nfun:_nss_files_getetherent_r=uninstrumented\nfun:_nss_files_getgrent_r=uninstrumented\nfun:_nss_files_getgrgid_r=uninstrumented\nfun:_nss_files_getgrnam_r=uninstrumented\nfun:_nss_files_gethostbyaddr_r=uninstrumented\nfun:_nss_files_gethostbyname2_r=uninstrumented\nfun:_nss_files_gethostbyname3_r=uninstrumented\nfun:_nss_files_gethostbyname4_r=uninstrumented\nfun:_nss_files_gethostbyname_r=uninstrumented\nfun:_nss_files_gethostent_r=uninstrumented\nfun:_nss_files_gethostton_r=uninstrumented\nfun:_nss_files_getnetbyaddr_r=uninstrumented\nfun:_nss_files_getnetbyname_r=uninstrumented\nfun:_nss_files_getnetent_r=uninstrumented\nfun:_nss_files_getnetgrent_r=uninstrumented\nfun:_nss_files_getntohost_r=uninstrumented\nfun:_nss_files_getprotobyname_r=uninstrumented\nfun:_nss_files_getprotobynumber_r=uninstrumented\nfun:_nss_files_getprotoent_r=uninstrumented\nfun:_nss_files_getpwent_r=uninstrumented\nfun:_nss_files_getpwnam_r=uninstrumented\nfun:_nss_files_getpwuid_r=uninstrumented\nfun:_nss_files_getrpcbyname_r=uninstrumented\nfun:_nss_files_getrpcbynumber_r=uninstrumented\nfun:_nss_files_getrpcent_r=uninstrumented\nfun:_nss_files_getservbyname_r=uninstrumented\nfun:_nss_files_getservbyport_r=uninstrumented\nfun:_nss_files_getservent_r=uninstrumented\nfun:_nss_files_getsgent_r=uninstrumented\nfun:_nss_files_getsgnam_r=uninstrumented\nfun:_nss_files_getspent_r=uninstrumented\nfun:_nss_files_getspnam_r=uninstrumented\nfun:_nss_files_init=uninstrumented\nfun:_nss_files_initgroups_dyn=uninstrumented\nfun:_nss_files_parse_etherent=uninstrumented\nfun:_nss_files_parse_grent=uninstrumented\nfun:_nss_files_parse_netent=uninstrumented\nfun:_nss_files_parse_protoent=uninstrumented\nfun:_nss_files_parse_pwent=uninstrumented\nfun:_nss_files_parse_rpcent=uninstrumented\nfun:_nss_files_parse_servent=uninstrumented\nfun:_nss_files_parse_sgent=uninstrumented\nfun:_nss_files_parse_spent=uninstrumented\nfun:_nss_files_setaliasent=uninstrumented\nfun:_nss_files_setetherent=uninstrumented\nfun:_nss_files_setgrent=uninstrumented\nfun:_nss_files_sethostent=uninstrumented\nfun:_nss_files_setnetent=uninstrumented\nfun:_nss_files_setnetgrent=uninstrumented\nfun:_nss_files_setprotoent=uninstrumented\nfun:_nss_files_setpwent=uninstrumented\nfun:_nss_files_setrpcent=uninstrumented\nfun:_nss_files_setservent=uninstrumented\nfun:_nss_files_setsgent=uninstrumented\nfun:_nss_files_setspent=uninstrumented\nfun:_nss_netgroup_parseline=uninstrumented\nfun:_obstack_allocated_p=uninstrumented\nfun:_obstack_begin=uninstrumented\nfun:_obstack_begin_1=uninstrumented\nfun:_obstack_free=uninstrumented\nfun:_obstack_memory_used=uninstrumented\nfun:_obstack_newchunk=uninstrumented\nfun:_pthread_cleanup_pop=uninstrumented\nfun:_pthread_cleanup_pop_restore=uninstrumented\nfun:_pthread_cleanup_push=uninstrumented\nfun:_pthread_cleanup_push_defer=uninstrumented\nfun:_rpc_dtablesize=uninstrumented\nfun:_seterr_reply=uninstrumented\nfun:_sethtent=uninstrumented\nfun:_setjmp=uninstrumented\nfun:_tolower=uninstrumented\nfun:_toupper=uninstrumented\nfun:_xdr_ib_request=uninstrumented\nfun:_xdr_nis_result=uninstrumented\nfun:a64l=uninstrumented\nfun:abort=uninstrumented\nfun:abs=uninstrumented\nfun:accept=uninstrumented\nfun:accept4=uninstrumented\nfun:access=uninstrumented\nfun:acct=uninstrumented\nfun:acos=uninstrumented\nfun:acosf=uninstrumented\nfun:acosf128=uninstrumented\nfun:acosf32=uninstrumented\nfun:acosf32x=uninstrumented\nfun:acosf64=uninstrumented\nfun:acosf64x=uninstrumented\nfun:acosh=uninstrumented\nfun:acoshf=uninstrumented\nfun:acoshf128=uninstrumented\nfun:acoshf32=uninstrumented\nfun:acoshf32x=uninstrumented\nfun:acoshf64=uninstrumented\nfun:acoshf64x=uninstrumented\nfun:acoshl=uninstrumented\nfun:acosl=uninstrumented\nfun:addmntent=uninstrumented\nfun:addseverity=uninstrumented\nfun:adjtime=uninstrumented\nfun:adjtimex=uninstrumented\nfun:advance=uninstrumented\nfun:aio_cancel=uninstrumented\nfun:aio_cancel64=uninstrumented\nfun:aio_error=uninstrumented\nfun:aio_error64=uninstrumented\nfun:aio_fsync=uninstrumented\nfun:aio_fsync64=uninstrumented\nfun:aio_init=uninstrumented\nfun:aio_read=uninstrumented\nfun:aio_read64=uninstrumented\nfun:aio_return=uninstrumented\nfun:aio_return64=uninstrumented\nfun:aio_suspend=uninstrumented\nfun:aio_suspend64=uninstrumented\nfun:aio_write=uninstrumented\nfun:aio_write64=uninstrumented\nfun:alarm=uninstrumented\nfun:aligned_alloc=uninstrumented\nfun:alphasort=uninstrumented\nfun:alphasort64=uninstrumented\nfun:arc4random=uninstrumented\nfun:arc4random_buf=uninstrumented\nfun:arc4random_uniform=uninstrumented\nfun:arch_prctl=uninstrumented\nfun:argp_error=uninstrumented\nfun:argp_failure=uninstrumented\nfun:argp_help=uninstrumented\nfun:argp_parse=uninstrumented\nfun:argp_state_help=uninstrumented\nfun:argp_usage=uninstrumented\nfun:argz_add=uninstrumented\nfun:argz_add_sep=uninstrumented\nfun:argz_append=uninstrumented\nfun:argz_count=uninstrumented\nfun:argz_create=uninstrumented\nfun:argz_create_sep=uninstrumented\nfun:argz_delete=uninstrumented\nfun:argz_extract=uninstrumented\nfun:argz_insert=uninstrumented\nfun:argz_next=uninstrumented\nfun:argz_replace=uninstrumented\nfun:argz_stringify=uninstrumented\nfun:asctime=uninstrumented\nfun:asctime_r=uninstrumented\nfun:asin=uninstrumented\nfun:asinf=uninstrumented\nfun:asinf128=uninstrumented\nfun:asinf32=uninstrumented\nfun:asinf32x=uninstrumented\nfun:asinf64=uninstrumented\nfun:asinf64x=uninstrumented\nfun:asinh=uninstrumented\nfun:asinhf=uninstrumented\nfun:asinhf128=uninstrumented\nfun:asinhf32=uninstrumented\nfun:asinhf32x=uninstrumented\nfun:asinhf64=uninstrumented\nfun:asinhf64x=uninstrumented\nfun:asinhl=uninstrumented\nfun:asinl=uninstrumented\nfun:asprintf=uninstrumented\nfun:at_quick_exit=uninstrumented\nfun:atan=uninstrumented\nfun:atan2=uninstrumented\nfun:atan2f=uninstrumented\nfun:atan2f128=uninstrumented\nfun:atan2f32=uninstrumented\nfun:atan2f32x=uninstrumented\nfun:atan2f64=uninstrumented\nfun:atan2f64x=uninstrumented\nfun:atan2l=uninstrumented\nfun:atanf=uninstrumented\nfun:atanf128=uninstrumented\nfun:atanf32=uninstrumented\nfun:atanf32x=uninstrumented\nfun:atanf64=uninstrumented\nfun:atanf64x=uninstrumented\nfun:atanh=uninstrumented\nfun:atanhf=uninstrumented\nfun:atanhf128=uninstrumented\nfun:atanhf32=uninstrumented\nfun:atanhf32x=uninstrumented\nfun:atanhf64=uninstrumented\nfun:atanhf64x=uninstrumented\nfun:atanhl=uninstrumented\nfun:atanl=uninstrumented\nfun:atexit=uninstrumented\nfun:atof=uninstrumented\nfun:atoi=uninstrumented\nfun:atol=uninstrumented\nfun:atoll=uninstrumented\nfun:authdes_create=uninstrumented\nfun:authdes_getucred=uninstrumented\nfun:authdes_pk_create=uninstrumented\nfun:authnone_create=uninstrumented\nfun:authunix_create=uninstrumented\nfun:authunix_create_default=uninstrumented\nfun:backtrace=uninstrumented\nfun:backtrace_symbols=uninstrumented\nfun:backtrace_symbols_fd=uninstrumented\nfun:basename=uninstrumented\nfun:bcmp=uninstrumented\nfun:bcopy=uninstrumented\nfun:bdflush=uninstrumented\nfun:bind=uninstrumented\nfun:bind_textdomain_codeset=uninstrumented\nfun:bindresvport=uninstrumented\nfun:bindtextdomain=uninstrumented\nfun:brk=uninstrumented\nfun:bsd_signal=uninstrumented\nfun:bsearch=uninstrumented\nfun:btowc=uninstrumented\nfun:bzero=uninstrumented\nfun:c16rtomb=uninstrumented\nfun:c32rtomb=uninstrumented\nfun:c8rtomb=uninstrumented\nfun:cabs=uninstrumented\nfun:cabsf=uninstrumented\nfun:cabsf128=uninstrumented\nfun:cabsf32=uninstrumented\nfun:cabsf32x=uninstrumented\nfun:cabsf64=uninstrumented\nfun:cabsf64x=uninstrumented\nfun:cabsl=uninstrumented\nfun:cacos=uninstrumented\nfun:cacosf=uninstrumented\nfun:cacosf128=uninstrumented\nfun:cacosf32=uninstrumented\nfun:cacosf32x=uninstrumented\nfun:cacosf64=uninstrumented\nfun:cacosf64x=uninstrumented\nfun:cacosh=uninstrumented\nfun:cacoshf=uninstrumented\nfun:cacoshf128=uninstrumented\nfun:cacoshf32=uninstrumented\nfun:cacoshf32x=uninstrumented\nfun:cacoshf64=uninstrumented\nfun:cacoshf64x=uninstrumented\nfun:cacoshl=uninstrumented\nfun:cacosl=uninstrumented\nfun:call_once=uninstrumented\nfun:calloc=uninstrumented\nfun:callrpc=uninstrumented\nfun:canonicalize=uninstrumented\nfun:canonicalize_file_name=uninstrumented\nfun:canonicalizef=uninstrumented\nfun:canonicalizef128=uninstrumented\nfun:canonicalizef32=uninstrumented\nfun:canonicalizef32x=uninstrumented\nfun:canonicalizef64=uninstrumented\nfun:canonicalizef64x=uninstrumented\nfun:canonicalizel=uninstrumented\nfun:capget=uninstrumented\nfun:capset=uninstrumented\nfun:carg=uninstrumented\nfun:cargf=uninstrumented\nfun:cargf128=uninstrumented\nfun:cargf32=uninstrumented\nfun:cargf32x=uninstrumented\nfun:cargf64=uninstrumented\nfun:cargf64x=uninstrumented\nfun:cargl=uninstrumented\nfun:casin=uninstrumented\nfun:casinf=uninstrumented\nfun:casinf128=uninstrumented\nfun:casinf32=uninstrumented\nfun:casinf32x=uninstrumented\nfun:casinf64=uninstrumented\nfun:casinf64x=uninstrumented\nfun:casinh=uninstrumented\nfun:casinhf=uninstrumented\nfun:casinhf128=uninstrumented\nfun:casinhf32=uninstrumented\nfun:casinhf32x=uninstrumented\nfun:casinhf64=uninstrumented\nfun:casinhf64x=uninstrumented\nfun:casinhl=uninstrumented\nfun:casinl=uninstrumented\nfun:catan=uninstrumented\nfun:catanf=uninstrumented\nfun:catanf128=uninstrumented\nfun:catanf32=uninstrumented\nfun:catanf32x=uninstrumented\nfun:catanf64=uninstrumented\nfun:catanf64x=uninstrumented\nfun:catanh=uninstrumented\nfun:catanhf=uninstrumented\nfun:catanhf128=uninstrumented\nfun:catanhf32=uninstrumented\nfun:catanhf32x=uninstrumented\nfun:catanhf64=uninstrumented\nfun:catanhf64x=uninstrumented\nfun:catanhl=uninstrumented\nfun:catanl=uninstrumented\nfun:catclose=uninstrumented\nfun:catgets=uninstrumented\nfun:catopen=uninstrumented\nfun:cbc_crypt=uninstrumented\nfun:cbrt=uninstrumented\nfun:cbrtf=uninstrumented\nfun:cbrtf128=uninstrumented\nfun:cbrtf32=uninstrumented\nfun:cbrtf32x=uninstrumented\nfun:cbrtf64=uninstrumented\nfun:cbrtf64x=uninstrumented\nfun:cbrtl=uninstrumented\nfun:ccos=uninstrumented\nfun:ccosf=uninstrumented\nfun:ccosf128=uninstrumented\nfun:ccosf32=uninstrumented\nfun:ccosf32x=uninstrumented\nfun:ccosf64=uninstrumented\nfun:ccosf64x=uninstrumented\nfun:ccosh=uninstrumented\nfun:ccoshf=uninstrumented\nfun:ccoshf128=uninstrumented\nfun:ccoshf32=uninstrumented\nfun:ccoshf32x=uninstrumented\nfun:ccoshf64=uninstrumented\nfun:ccoshf64x=uninstrumented\nfun:ccoshl=uninstrumented\nfun:ccosl=uninstrumented\nfun:ceil=uninstrumented\nfun:ceilf=uninstrumented\nfun:ceilf128=uninstrumented\nfun:ceilf32=uninstrumented\nfun:ceilf32x=uninstrumented\nfun:ceilf64=uninstrumented\nfun:ceilf64x=uninstrumented\nfun:ceill=uninstrumented\nfun:cexp=uninstrumented\nfun:cexpf=uninstrumented\nfun:cexpf128=uninstrumented\nfun:cexpf32=uninstrumented\nfun:cexpf32x=uninstrumented\nfun:cexpf64=uninstrumented\nfun:cexpf64x=uninstrumented\nfun:cexpl=uninstrumented\nfun:cfgetispeed=uninstrumented\nfun:cfgetospeed=uninstrumented\nfun:cfmakeraw=uninstrumented\nfun:cfree=uninstrumented\nfun:cfsetispeed=uninstrumented\nfun:cfsetospeed=uninstrumented\nfun:cfsetspeed=uninstrumented\nfun:chdir=uninstrumented\nfun:chflags=uninstrumented\nfun:chmod=uninstrumented\nfun:chown=uninstrumented\nfun:chroot=uninstrumented\nfun:cimag=uninstrumented\nfun:cimagf=uninstrumented\nfun:cimagf128=uninstrumented\nfun:cimagf32=uninstrumented\nfun:cimagf32x=uninstrumented\nfun:cimagf64=uninstrumented\nfun:cimagf64x=uninstrumented\nfun:cimagl=uninstrumented\nfun:clearenv=uninstrumented\nfun:clearerr=uninstrumented\nfun:clearerr_unlocked=uninstrumented\nfun:clnt_broadcast=uninstrumented\nfun:clnt_create=uninstrumented\nfun:clnt_pcreateerror=uninstrumented\nfun:clnt_perrno=uninstrumented\nfun:clnt_perror=uninstrumented\nfun:clnt_spcreateerror=uninstrumented\nfun:clnt_sperrno=uninstrumented\nfun:clnt_sperror=uninstrumented\nfun:clntraw_create=uninstrumented\nfun:clnttcp_create=uninstrumented\nfun:clntudp_bufcreate=uninstrumented\nfun:clntudp_create=uninstrumented\nfun:clntunix_create=uninstrumented\nfun:clock=uninstrumented\nfun:clock_adjtime=uninstrumented\nfun:clock_getcpuclockid=uninstrumented\nfun:clock_getres=uninstrumented\nfun:clock_gettime=uninstrumented\nfun:clock_nanosleep=uninstrumented\nfun:clock_settime=uninstrumented\nfun:clog=uninstrumented\nfun:clog10=uninstrumented\nfun:clog10f=uninstrumented\nfun:clog10f128=uninstrumented\nfun:clog10f32=uninstrumented\nfun:clog10f32x=uninstrumented\nfun:clog10f64=uninstrumented\nfun:clog10f64x=uninstrumented\nfun:clog10l=uninstrumented\nfun:clogf=uninstrumented\nfun:clogf128=uninstrumented\nfun:clogf32=uninstrumented\nfun:clogf32x=uninstrumented\nfun:clogf64=uninstrumented\nfun:clogf64x=uninstrumented\nfun:clogl=uninstrumented\nfun:clone=uninstrumented\nfun:close=uninstrumented\nfun:close_range=uninstrumented\nfun:closedir=uninstrumented\nfun:closefrom=uninstrumented\nfun:closelog=uninstrumented\nfun:cnd_broadcast=uninstrumented\nfun:cnd_destroy=uninstrumented\nfun:cnd_init=uninstrumented\nfun:cnd_signal=uninstrumented\nfun:cnd_timedwait=uninstrumented\nfun:cnd_wait=uninstrumented\nfun:confstr=uninstrumented\nfun:conj=uninstrumented\nfun:conjf=uninstrumented\nfun:conjf128=uninstrumented\nfun:conjf32=uninstrumented\nfun:conjf32x=uninstrumented\nfun:conjf64=uninstrumented\nfun:conjf64x=uninstrumented\nfun:conjl=uninstrumented\nfun:connect=uninstrumented\nfun:copy_file_range=uninstrumented\nfun:copysign=uninstrumented\nfun:copysignf=uninstrumented\nfun:copysignf128=uninstrumented\nfun:copysignf32=uninstrumented\nfun:copysignf32x=uninstrumented\nfun:copysignf64=uninstrumented\nfun:copysignf64x=uninstrumented\nfun:copysignl=uninstrumented\nfun:cos=uninstrumented\nfun:cosf=uninstrumented\nfun:cosf128=uninstrumented\nfun:cosf32=uninstrumented\nfun:cosf32x=uninstrumented\nfun:cosf64=uninstrumented\nfun:cosf64x=uninstrumented\nfun:cosh=uninstrumented\nfun:coshf=uninstrumented\nfun:coshf128=uninstrumented\nfun:coshf32=uninstrumented\nfun:coshf32x=uninstrumented\nfun:coshf64=uninstrumented\nfun:coshf64x=uninstrumented\nfun:coshl=uninstrumented\nfun:cosl=uninstrumented\nfun:cpow=uninstrumented\nfun:cpowf=uninstrumented\nfun:cpowf128=uninstrumented\nfun:cpowf32=uninstrumented\nfun:cpowf32x=uninstrumented\nfun:cpowf64=uninstrumented\nfun:cpowf64x=uninstrumented\nfun:cpowl=uninstrumented\nfun:cproj=uninstrumented\nfun:cprojf=uninstrumented\nfun:cprojf128=uninstrumented\nfun:cprojf32=uninstrumented\nfun:cprojf32x=uninstrumented\nfun:cprojf64=uninstrumented\nfun:cprojf64x=uninstrumented\nfun:cprojl=uninstrumented\nfun:creal=uninstrumented\nfun:crealf=uninstrumented\nfun:crealf128=uninstrumented\nfun:crealf32=uninstrumented\nfun:crealf32x=uninstrumented\nfun:crealf64=uninstrumented\nfun:crealf64x=uninstrumented\nfun:creall=uninstrumented\nfun:creat=uninstrumented\nfun:creat64=uninstrumented\nfun:create_module=uninstrumented\nfun:crypt=uninstrumented\nfun:crypt_checksalt=uninstrumented\nfun:crypt_gensalt=uninstrumented\nfun:crypt_gensalt_r=uninstrumented\nfun:crypt_gensalt_ra=uninstrumented\nfun:crypt_gensalt_rn=uninstrumented\nfun:crypt_preferred_method=uninstrumented\nfun:crypt_r=uninstrumented\nfun:crypt_ra=uninstrumented\nfun:crypt_rn=uninstrumented\nfun:csin=uninstrumented\nfun:csinf=uninstrumented\nfun:csinf128=uninstrumented\nfun:csinf32=uninstrumented\nfun:csinf32x=uninstrumented\nfun:csinf64=uninstrumented\nfun:csinf64x=uninstrumented\nfun:csinh=uninstrumented\nfun:csinhf=uninstrumented\nfun:csinhf128=uninstrumented\nfun:csinhf32=uninstrumented\nfun:csinhf32x=uninstrumented\nfun:csinhf64=uninstrumented\nfun:csinhf64x=uninstrumented\nfun:csinhl=uninstrumented\nfun:csinl=uninstrumented\nfun:csqrt=uninstrumented\nfun:csqrtf=uninstrumented\nfun:csqrtf128=uninstrumented\nfun:csqrtf32=uninstrumented\nfun:csqrtf32x=uninstrumented\nfun:csqrtf64=uninstrumented\nfun:csqrtf64x=uninstrumented\nfun:csqrtl=uninstrumented\nfun:ctan=uninstrumented\nfun:ctanf=uninstrumented\nfun:ctanf128=uninstrumented\nfun:ctanf32=uninstrumented\nfun:ctanf32x=uninstrumented\nfun:ctanf64=uninstrumented\nfun:ctanf64x=uninstrumented\nfun:ctanh=uninstrumented\nfun:ctanhf=uninstrumented\nfun:ctanhf128=uninstrumented\nfun:ctanhf32=uninstrumented\nfun:ctanhf32x=uninstrumented\nfun:ctanhf64=uninstrumented\nfun:ctanhf64x=uninstrumented\nfun:ctanhl=uninstrumented\nfun:ctanl=uninstrumented\nfun:ctermid=uninstrumented\nfun:ctime=uninstrumented\nfun:ctime_r=uninstrumented\nfun:cuserid=uninstrumented\nfun:daddl=uninstrumented\nfun:daemon=uninstrumented\nfun:dcgettext=uninstrumented\nfun:dcngettext=uninstrumented\nfun:ddivl=uninstrumented\nfun:delete_module=uninstrumented\nfun:des_setparity=uninstrumented\nfun:dfmal=uninstrumented\nfun:dgettext=uninstrumented\nfun:difftime=uninstrumented\nfun:dirfd=uninstrumented\nfun:dirname=uninstrumented\nfun:div=uninstrumented\nfun:dl_iterate_phdr=uninstrumented\nfun:dladdr=uninstrumented\nfun:dladdr1=uninstrumented\nfun:dlclose=uninstrumented\nfun:dlerror=uninstrumented\nfun:dlinfo=uninstrumented\nfun:dlmopen=uninstrumented\nfun:dlopen=uninstrumented\nfun:dlsym=uninstrumented\nfun:dlvsym=uninstrumented\nfun:dmull=uninstrumented\nfun:dn_comp=uninstrumented\nfun:dn_expand=uninstrumented\nfun:dn_skipname=uninstrumented\nfun:dngettext=uninstrumented\nfun:dprintf=uninstrumented\nfun:drand48=uninstrumented\nfun:drand48_r=uninstrumented\nfun:drem=uninstrumented\nfun:dremf=uninstrumented\nfun:dreml=uninstrumented\nfun:dsqrtl=uninstrumented\nfun:dsubl=uninstrumented\nfun:dup=uninstrumented\nfun:dup2=uninstrumented\nfun:dup3=uninstrumented\nfun:duplocale=uninstrumented\nfun:dysize=uninstrumented\nfun:eaccess=uninstrumented\nfun:ecb_crypt=uninstrumented\nfun:ecvt=uninstrumented\nfun:ecvt_r=uninstrumented\nfun:encrypt=uninstrumented\nfun:encrypt_r=uninstrumented\nfun:endaliasent=uninstrumented\nfun:endfsent=uninstrumented\nfun:endgrent=uninstrumented\nfun:endhostent=uninstrumented\nfun:endmntent=uninstrumented\nfun:endnetent=uninstrumented\nfun:endnetgrent=uninstrumented\nfun:endprotoent=uninstrumented\nfun:endpwent=uninstrumented\nfun:endrpcent=uninstrumented\nfun:endservent=uninstrumented\nfun:endsgent=uninstrumented\nfun:endspent=uninstrumented\nfun:endttyent=uninstrumented\nfun:endusershell=uninstrumented\nfun:endutent=uninstrumented\nfun:endutxent=uninstrumented\nfun:envz_add=uninstrumented\nfun:envz_entry=uninstrumented\nfun:envz_get=uninstrumented\nfun:envz_merge=uninstrumented\nfun:envz_remove=uninstrumented\nfun:envz_strip=uninstrumented\nfun:epoll_create=uninstrumented\nfun:epoll_create1=uninstrumented\nfun:epoll_ctl=uninstrumented\nfun:epoll_pwait=uninstrumented\nfun:epoll_pwait2=uninstrumented\nfun:epoll_wait=uninstrumented\nfun:erand48=uninstrumented\nfun:erand48_r=uninstrumented\nfun:erf=uninstrumented\nfun:erfc=uninstrumented\nfun:erfcf=uninstrumented\nfun:erfcf128=uninstrumented\nfun:erfcf32=uninstrumented\nfun:erfcf32x=uninstrumented\nfun:erfcf64=uninstrumented\nfun:erfcf64x=uninstrumented\nfun:erfcl=uninstrumented\nfun:erff=uninstrumented\nfun:erff128=uninstrumented\nfun:erff32=uninstrumented\nfun:erff32x=uninstrumented\nfun:erff64=uninstrumented\nfun:erff64x=uninstrumented\nfun:erfl=uninstrumented\nfun:err=uninstrumented\nfun:error=uninstrumented\nfun:error_at_line=uninstrumented\nfun:errx=uninstrumented\nfun:ether_aton=uninstrumented\nfun:ether_aton_r=uninstrumented\nfun:ether_hostton=uninstrumented\nfun:ether_line=uninstrumented\nfun:ether_ntoa=uninstrumented\nfun:ether_ntoa_r=uninstrumented\nfun:ether_ntohost=uninstrumented\nfun:euidaccess=uninstrumented\nfun:eventfd=uninstrumented\nfun:eventfd_read=uninstrumented\nfun:eventfd_write=uninstrumented\nfun:execl=uninstrumented\nfun:execle=uninstrumented\nfun:execlp=uninstrumented\nfun:execv=uninstrumented\nfun:execve=uninstrumented\nfun:execveat=uninstrumented\nfun:execvp=uninstrumented\nfun:execvpe=uninstrumented\nfun:exit=uninstrumented\nfun:exp=uninstrumented\nfun:exp10=uninstrumented\nfun:exp10f=uninstrumented\nfun:exp10f128=uninstrumented\nfun:exp10f32=uninstrumented\nfun:exp10f32x=uninstrumented\nfun:exp10f64=uninstrumented\nfun:exp10f64x=uninstrumented\nfun:exp10l=uninstrumented\nfun:exp2=uninstrumented\nfun:exp2f=uninstrumented\nfun:exp2f128=uninstrumented\nfun:exp2f32=uninstrumented\nfun:exp2f32x=uninstrumented\nfun:exp2f64=uninstrumented\nfun:exp2f64x=uninstrumented\nfun:exp2l=uninstrumented\nfun:expf=uninstrumented\nfun:expf128=uninstrumented\nfun:expf32=uninstrumented\nfun:expf32x=uninstrumented\nfun:expf64=uninstrumented\nfun:expf64x=uninstrumented\nfun:expl=uninstrumented\nfun:explicit_bzero=uninstrumented\nfun:expm1=uninstrumented\nfun:expm1f=uninstrumented\nfun:expm1f128=uninstrumented\nfun:expm1f32=uninstrumented\nfun:expm1f32x=uninstrumented\nfun:expm1f64=uninstrumented\nfun:expm1f64x=uninstrumented\nfun:expm1l=uninstrumented\nfun:f32addf128=uninstrumented\nfun:f32addf32x=uninstrumented\nfun:f32addf64=uninstrumented\nfun:f32addf64x=uninstrumented\nfun:f32divf128=uninstrumented\nfun:f32divf32x=uninstrumented\nfun:f32divf64=uninstrumented\nfun:f32divf64x=uninstrumented\nfun:f32fmaf128=uninstrumented\nfun:f32fmaf32x=uninstrumented\nfun:f32fmaf64=uninstrumented\nfun:f32fmaf64x=uninstrumented\nfun:f32mulf128=uninstrumented\nfun:f32mulf32x=uninstrumented\nfun:f32mulf64=uninstrumented\nfun:f32mulf64x=uninstrumented\nfun:f32sqrtf128=uninstrumented\nfun:f32sqrtf32x=uninstrumented\nfun:f32sqrtf64=uninstrumented\nfun:f32sqrtf64x=uninstrumented\nfun:f32subf128=uninstrumented\nfun:f32subf32x=uninstrumented\nfun:f32subf64=uninstrumented\nfun:f32subf64x=uninstrumented\nfun:f32xaddf128=uninstrumented\nfun:f32xaddf64=uninstrumented\nfun:f32xaddf64x=uninstrumented\nfun:f32xdivf128=uninstrumented\nfun:f32xdivf64=uninstrumented\nfun:f32xdivf64x=uninstrumented\nfun:f32xfmaf128=uninstrumented\nfun:f32xfmaf64=uninstrumented\nfun:f32xfmaf64x=uninstrumented\nfun:f32xmulf128=uninstrumented\nfun:f32xmulf64=uninstrumented\nfun:f32xmulf64x=uninstrumented\nfun:f32xsqrtf128=uninstrumented\nfun:f32xsqrtf64=uninstrumented\nfun:f32xsqrtf64x=uninstrumented\nfun:f32xsubf128=uninstrumented\nfun:f32xsubf64=uninstrumented\nfun:f32xsubf64x=uninstrumented\nfun:f64addf128=uninstrumented\nfun:f64addf64x=uninstrumented\nfun:f64divf128=uninstrumented\nfun:f64divf64x=uninstrumented\nfun:f64fmaf128=uninstrumented\nfun:f64fmaf64x=uninstrumented\nfun:f64mulf128=uninstrumented\nfun:f64mulf64x=uninstrumented\nfun:f64sqrtf128=uninstrumented\nfun:f64sqrtf64x=uninstrumented\nfun:f64subf128=uninstrumented\nfun:f64subf64x=uninstrumented\nfun:f64xaddf128=uninstrumented\nfun:f64xdivf128=uninstrumented\nfun:f64xfmaf128=uninstrumented\nfun:f64xmulf128=uninstrumented\nfun:f64xsqrtf128=uninstrumented\nfun:f64xsubf128=uninstrumented\nfun:fabs=uninstrumented\nfun:fabsf=uninstrumented\nfun:fabsf128=uninstrumented\nfun:fabsf32=uninstrumented\nfun:fabsf32x=uninstrumented\nfun:fabsf64=uninstrumented\nfun:fabsf64x=uninstrumented\nfun:fabsl=uninstrumented\nfun:faccessat=uninstrumented\nfun:fadd=uninstrumented\nfun:faddl=uninstrumented\nfun:fallocate=uninstrumented\nfun:fallocate64=uninstrumented\nfun:fanotify_init=uninstrumented\nfun:fanotify_mark=uninstrumented\nfun:fattach=uninstrumented\nfun:fchdir=uninstrumented\nfun:fchflags=uninstrumented\nfun:fchmod=uninstrumented\nfun:fchmodat=uninstrumented\nfun:fchown=uninstrumented\nfun:fchownat=uninstrumented\nfun:fclose=uninstrumented\nfun:fcloseall=uninstrumented\nfun:fcntl=uninstrumented\nfun:fcntl64=uninstrumented\nfun:fcrypt=uninstrumented\nfun:fcvt=uninstrumented\nfun:fcvt_r=uninstrumented\nfun:fdatasync=uninstrumented\nfun:fdetach=uninstrumented\nfun:fdim=uninstrumented\nfun:fdimf=uninstrumented\nfun:fdimf128=uninstrumented\nfun:fdimf32=uninstrumented\nfun:fdimf32x=uninstrumented\nfun:fdimf64=uninstrumented\nfun:fdimf64x=uninstrumented\nfun:fdiml=uninstrumented\nfun:fdiv=uninstrumented\nfun:fdivl=uninstrumented\nfun:fdopen=uninstrumented\nfun:fdopendir=uninstrumented\nfun:feclearexcept=uninstrumented\nfun:fedisableexcept=uninstrumented\nfun:feenableexcept=uninstrumented\nfun:fegetenv=uninstrumented\nfun:fegetexcept=uninstrumented\nfun:fegetexceptflag=uninstrumented\nfun:fegetmode=uninstrumented\nfun:fegetround=uninstrumented\nfun:feholdexcept=uninstrumented\nfun:feof=uninstrumented\nfun:feof_unlocked=uninstrumented\nfun:feraiseexcept=uninstrumented\nfun:ferror=uninstrumented\nfun:ferror_unlocked=uninstrumented\nfun:fesetenv=uninstrumented\nfun:fesetexcept=uninstrumented\nfun:fesetexceptflag=uninstrumented\nfun:fesetmode=uninstrumented\nfun:fesetround=uninstrumented\nfun:fetestexcept=uninstrumented\nfun:fetestexceptflag=uninstrumented\nfun:feupdateenv=uninstrumented\nfun:fexecve=uninstrumented\nfun:fflush=uninstrumented\nfun:fflush_unlocked=uninstrumented\nfun:ffma=uninstrumented\nfun:ffmal=uninstrumented\nfun:ffs=uninstrumented\nfun:ffsl=uninstrumented\nfun:ffsll=uninstrumented\nfun:fgetc=uninstrumented\nfun:fgetc_unlocked=uninstrumented\nfun:fgetgrent=uninstrumented\nfun:fgetgrent_r=uninstrumented\nfun:fgetpos=uninstrumented\nfun:fgetpos64=uninstrumented\nfun:fgetpwent=uninstrumented\nfun:fgetpwent_r=uninstrumented\nfun:fgets=uninstrumented\nfun:fgets_unlocked=uninstrumented\nfun:fgetsgent=uninstrumented\nfun:fgetsgent_r=uninstrumented\nfun:fgetspent=uninstrumented\nfun:fgetspent_r=uninstrumented\nfun:fgetwc=uninstrumented\nfun:fgetwc_unlocked=uninstrumented\nfun:fgetws=uninstrumented\nfun:fgetws_unlocked=uninstrumented\nfun:fgetxattr=uninstrumented\nfun:fileno=uninstrumented\nfun:fileno_unlocked=uninstrumented\nfun:finite=uninstrumented\nfun:finitef=uninstrumented\nfun:finitel=uninstrumented\nfun:flistxattr=uninstrumented\nfun:flock=uninstrumented\nfun:flockfile=uninstrumented\nfun:floor=uninstrumented\nfun:floorf=uninstrumented\nfun:floorf128=uninstrumented\nfun:floorf32=uninstrumented\nfun:floorf32x=uninstrumented\nfun:floorf64=uninstrumented\nfun:floorf64x=uninstrumented\nfun:floorl=uninstrumented\nfun:fma=uninstrumented\nfun:fmaf=uninstrumented\nfun:fmaf128=uninstrumented\nfun:fmaf32=uninstrumented\nfun:fmaf32x=uninstrumented\nfun:fmaf64=uninstrumented\nfun:fmaf64x=uninstrumented\nfun:fmal=uninstrumented\nfun:fmax=uninstrumented\nfun:fmaxf=uninstrumented\nfun:fmaxf128=uninstrumented\nfun:fmaxf32=uninstrumented\nfun:fmaxf32x=uninstrumented\nfun:fmaxf64=uninstrumented\nfun:fmaxf64x=uninstrumented\nfun:fmaximum=uninstrumented\nfun:fmaximum_mag=uninstrumented\nfun:fmaximum_mag_num=uninstrumented\nfun:fmaximum_mag_numf=uninstrumented\nfun:fmaximum_mag_numf128=uninstrumented\nfun:fmaximum_mag_numf32=uninstrumented\nfun:fmaximum_mag_numf32x=uninstrumented\nfun:fmaximum_mag_numf64=uninstrumented\nfun:fmaximum_mag_numf64x=uninstrumented\nfun:fmaximum_mag_numl=uninstrumented\nfun:fmaximum_magf=uninstrumented\nfun:fmaximum_magf128=uninstrumented\nfun:fmaximum_magf32=uninstrumented\nfun:fmaximum_magf32x=uninstrumented\nfun:fmaximum_magf64=uninstrumented\nfun:fmaximum_magf64x=uninstrumented\nfun:fmaximum_magl=uninstrumented\nfun:fmaximum_num=uninstrumented\nfun:fmaximum_numf=uninstrumented\nfun:fmaximum_numf128=uninstrumented\nfun:fmaximum_numf32=uninstrumented\nfun:fmaximum_numf32x=uninstrumented\nfun:fmaximum_numf64=uninstrumented\nfun:fmaximum_numf64x=uninstrumented\nfun:fmaximum_numl=uninstrumented\nfun:fmaximumf=uninstrumented\nfun:fmaximumf128=uninstrumented\nfun:fmaximumf32=uninstrumented\nfun:fmaximumf32x=uninstrumented\nfun:fmaximumf64=uninstrumented\nfun:fmaximumf64x=uninstrumented\nfun:fmaximuml=uninstrumented\nfun:fmaxl=uninstrumented\nfun:fmaxmag=uninstrumented\nfun:fmaxmagf=uninstrumented\nfun:fmaxmagf128=uninstrumented\nfun:fmaxmagf32=uninstrumented\nfun:fmaxmagf32x=uninstrumented\nfun:fmaxmagf64=uninstrumented\nfun:fmaxmagf64x=uninstrumented\nfun:fmaxmagl=uninstrumented\nfun:fmemopen=uninstrumented\nfun:fmin=uninstrumented\nfun:fminf=uninstrumented\nfun:fminf128=uninstrumented\nfun:fminf32=uninstrumented\nfun:fminf32x=uninstrumented\nfun:fminf64=uninstrumented\nfun:fminf64x=uninstrumented\nfun:fminimum=uninstrumented\nfun:fminimum_mag=uninstrumented\nfun:fminimum_mag_num=uninstrumented\nfun:fminimum_mag_numf=uninstrumented\nfun:fminimum_mag_numf128=uninstrumented\nfun:fminimum_mag_numf32=uninstrumented\nfun:fminimum_mag_numf32x=uninstrumented\nfun:fminimum_mag_numf64=uninstrumented\nfun:fminimum_mag_numf64x=uninstrumented\nfun:fminimum_mag_numl=uninstrumented\nfun:fminimum_magf=uninstrumented\nfun:fminimum_magf128=uninstrumented\nfun:fminimum_magf32=uninstrumented\nfun:fminimum_magf32x=uninstrumented\nfun:fminimum_magf64=uninstrumented\nfun:fminimum_magf64x=uninstrumented\nfun:fminimum_magl=uninstrumented\nfun:fminimum_num=uninstrumented\nfun:fminimum_numf=uninstrumented\nfun:fminimum_numf128=uninstrumented\nfun:fminimum_numf32=uninstrumented\nfun:fminimum_numf32x=uninstrumented\nfun:fminimum_numf64=uninstrumented\nfun:fminimum_numf64x=uninstrumented\nfun:fminimum_numl=uninstrumented\nfun:fminimumf=uninstrumented\nfun:fminimumf128=uninstrumented\nfun:fminimumf32=uninstrumented\nfun:fminimumf32x=uninstrumented\nfun:fminimumf64=uninstrumented\nfun:fminimumf64x=uninstrumented\nfun:fminimuml=uninstrumented\nfun:fminl=uninstrumented\nfun:fminmag=uninstrumented\nfun:fminmagf=uninstrumented\nfun:fminmagf128=uninstrumented\nfun:fminmagf32=uninstrumented\nfun:fminmagf32x=uninstrumented\nfun:fminmagf64=uninstrumented\nfun:fminmagf64x=uninstrumented\nfun:fminmagl=uninstrumented\nfun:fmod=uninstrumented\nfun:fmodf=uninstrumented\nfun:fmodf128=uninstrumented\nfun:fmodf32=uninstrumented\nfun:fmodf32x=uninstrumented\nfun:fmodf64=uninstrumented\nfun:fmodf64x=uninstrumented\nfun:fmodl=uninstrumented\nfun:fmtmsg=uninstrumented\nfun:fmul=uninstrumented\nfun:fmull=uninstrumented\nfun:fnmatch=uninstrumented\nfun:fopen=uninstrumented\nfun:fopen64=uninstrumented\nfun:fopencookie=uninstrumented\nfun:fork=uninstrumented\nfun:forkpty=uninstrumented\nfun:fpathconf=uninstrumented\nfun:fprintf=uninstrumented\nfun:fputc=uninstrumented\nfun:fputc_unlocked=uninstrumented\nfun:fputs=uninstrumented\nfun:fputs_unlocked=uninstrumented\nfun:fputwc=uninstrumented\nfun:fputwc_unlocked=uninstrumented\nfun:fputws=uninstrumented\nfun:fputws_unlocked=uninstrumented\nfun:fread=uninstrumented\nfun:fread_unlocked=uninstrumented\nfun:free=uninstrumented\nfun:freeaddrinfo=uninstrumented\nfun:freeifaddrs=uninstrumented\nfun:freelocale=uninstrumented\nfun:fremovexattr=uninstrumented\nfun:freopen=uninstrumented\nfun:freopen64=uninstrumented\nfun:frexp=uninstrumented\nfun:frexpf=uninstrumented\nfun:frexpf128=uninstrumented\nfun:frexpf32=uninstrumented\nfun:frexpf32x=uninstrumented\nfun:frexpf64=uninstrumented\nfun:frexpf64x=uninstrumented\nfun:frexpl=uninstrumented\nfun:fromfp=uninstrumented\nfun:fromfpf=uninstrumented\nfun:fromfpf128=uninstrumented\nfun:fromfpf32=uninstrumented\nfun:fromfpf32x=uninstrumented\nfun:fromfpf64=uninstrumented\nfun:fromfpf64x=uninstrumented\nfun:fromfpl=uninstrumented\nfun:fromfpx=uninstrumented\nfun:fromfpxf=uninstrumented\nfun:fromfpxf128=uninstrumented\nfun:fromfpxf32=uninstrumented\nfun:fromfpxf32x=uninstrumented\nfun:fromfpxf64=uninstrumented\nfun:fromfpxf64x=uninstrumented\nfun:fromfpxl=uninstrumented\nfun:fscanf=uninstrumented\nfun:fsconfig=uninstrumented\nfun:fseek=uninstrumented\nfun:fseeko=uninstrumented\nfun:fseeko64=uninstrumented\nfun:fsetpos=uninstrumented\nfun:fsetpos64=uninstrumented\nfun:fsetxattr=uninstrumented\nfun:fsmount=uninstrumented\nfun:fsopen=uninstrumented\nfun:fspick=uninstrumented\nfun:fsqrt=uninstrumented\nfun:fsqrtl=uninstrumented\nfun:fstat=uninstrumented\nfun:fstat64=uninstrumented\nfun:fstatat=uninstrumented\nfun:fstatat64=uninstrumented\nfun:fstatfs=uninstrumented\nfun:fstatfs64=uninstrumented\nfun:fstatvfs=uninstrumented\nfun:fstatvfs64=uninstrumented\nfun:fsub=uninstrumented\nfun:fsubl=uninstrumented\nfun:fsync=uninstrumented\nfun:ftell=uninstrumented\nfun:ftello=uninstrumented\nfun:ftello64=uninstrumented\nfun:ftime=uninstrumented\nfun:ftok=uninstrumented\nfun:ftruncate=uninstrumented\nfun:ftruncate64=uninstrumented\nfun:ftrylockfile=uninstrumented\nfun:fts64_children=uninstrumented\nfun:fts64_close=uninstrumented\nfun:fts64_open=uninstrumented\nfun:fts64_read=uninstrumented\nfun:fts64_set=uninstrumented\nfun:fts_children=uninstrumented\nfun:fts_close=uninstrumented\nfun:fts_open=uninstrumented\nfun:fts_read=uninstrumented\nfun:fts_set=uninstrumented\nfun:ftw=uninstrumented\nfun:ftw64=uninstrumented\nfun:funlockfile=uninstrumented\nfun:futimens=uninstrumented\nfun:futimes=uninstrumented\nfun:futimesat=uninstrumented\nfun:fwide=uninstrumented\nfun:fwprintf=uninstrumented\nfun:fwrite=uninstrumented\nfun:fwrite_unlocked=uninstrumented\nfun:fwscanf=uninstrumented\nfun:gai_cancel=uninstrumented\nfun:gai_error=uninstrumented\nfun:gai_strerror=uninstrumented\nfun:gai_suspend=uninstrumented\nfun:gamma=uninstrumented\nfun:gammaf=uninstrumented\nfun:gammal=uninstrumented\nfun:gcvt=uninstrumented\nfun:get_avphys_pages=uninstrumented\nfun:get_current_dir_name=uninstrumented\nfun:get_kernel_syms=uninstrumented\nfun:get_myaddress=uninstrumented\nfun:get_nprocs=uninstrumented\nfun:get_nprocs_conf=uninstrumented\nfun:get_phys_pages=uninstrumented\nfun:getaddrinfo=uninstrumented\nfun:getaddrinfo_a=uninstrumented\nfun:getaliasbyname=uninstrumented\nfun:getaliasbyname_r=uninstrumented\nfun:getaliasent=uninstrumented\nfun:getaliasent_r=uninstrumented\nfun:getauxval=uninstrumented\nfun:getc=uninstrumented\nfun:getc_unlocked=uninstrumented\nfun:getchar=uninstrumented\nfun:getchar_unlocked=uninstrumented\nfun:getcontext=uninstrumented\nfun:getcpu=uninstrumented\nfun:getcwd=uninstrumented\nfun:getdate=uninstrumented\nfun:getdate_r=uninstrumented\nfun:getdelim=uninstrumented\nfun:getdents64=uninstrumented\nfun:getdirentries=uninstrumented\nfun:getdirentries64=uninstrumented\nfun:getdomainname=uninstrumented\nfun:getdtablesize=uninstrumented\nfun:getegid=uninstrumented\nfun:getentropy=uninstrumented\nfun:getenv=uninstrumented\nfun:geteuid=uninstrumented\nfun:getfsent=uninstrumented\nfun:getfsfile=uninstrumented\nfun:getfsspec=uninstrumented\nfun:getgid=uninstrumented\nfun:getgrent=uninstrumented\nfun:getgrent_r=uninstrumented\nfun:getgrgid=uninstrumented\nfun:getgrgid_r=uninstrumented\nfun:getgrnam=uninstrumented\nfun:getgrnam_r=uninstrumented\nfun:getgrouplist=uninstrumented\nfun:getgroups=uninstrumented\nfun:gethostbyaddr=uninstrumented\nfun:gethostbyaddr_r=uninstrumented\nfun:gethostbyname=uninstrumented\nfun:gethostbyname2=uninstrumented\nfun:gethostbyname2_r=uninstrumented\nfun:gethostbyname_r=uninstrumented\nfun:gethostent=uninstrumented\nfun:gethostent_r=uninstrumented\nfun:gethostid=uninstrumented\nfun:gethostname=uninstrumented\nfun:getifaddrs=uninstrumented\nfun:getipv4sourcefilter=uninstrumented\nfun:getitimer=uninstrumented\nfun:getline=uninstrumented\nfun:getloadavg=uninstrumented\nfun:getlogin=uninstrumented\nfun:getlogin_r=uninstrumented\nfun:getmntent=uninstrumented\nfun:getmntent_r=uninstrumented\nfun:getmsg=uninstrumented\nfun:getnameinfo=uninstrumented\nfun:getnetbyaddr=uninstrumented\nfun:getnetbyaddr_r=uninstrumented\nfun:getnetbyname=uninstrumented\nfun:getnetbyname_r=uninstrumented\nfun:getnetent=uninstrumented\nfun:getnetent_r=uninstrumented\nfun:getnetgrent=uninstrumented\nfun:getnetgrent_r=uninstrumented\nfun:getnetname=uninstrumented\nfun:getopt=uninstrumented\nfun:getopt_long=uninstrumented\nfun:getopt_long_only=uninstrumented\nfun:getpagesize=uninstrumented\nfun:getpass=uninstrumented\nfun:getpayload=uninstrumented\nfun:getpayloadf=uninstrumented\nfun:getpayloadf128=uninstrumented\nfun:getpayloadf32=uninstrumented\nfun:getpayloadf32x=uninstrumented\nfun:getpayloadf64=uninstrumented\nfun:getpayloadf64x=uninstrumented\nfun:getpayloadl=uninstrumented\nfun:getpeername=uninstrumented\nfun:getpgid=uninstrumented\nfun:getpgrp=uninstrumented\nfun:getpid=uninstrumented\nfun:getpmsg=uninstrumented\nfun:getppid=uninstrumented\nfun:getpriority=uninstrumented\nfun:getprotobyname=uninstrumented\nfun:getprotobyname_r=uninstrumented\nfun:getprotobynumber=uninstrumented\nfun:getprotobynumber_r=uninstrumented\nfun:getprotoent=uninstrumented\nfun:getprotoent_r=uninstrumented\nfun:getpt=uninstrumented\nfun:getpublickey=uninstrumented\nfun:getpw=uninstrumented\nfun:getpwent=uninstrumented\nfun:getpwent_r=uninstrumented\nfun:getpwnam=uninstrumented\nfun:getpwnam_r=uninstrumented\nfun:getpwuid=uninstrumented\nfun:getpwuid_r=uninstrumented\nfun:getrandom=uninstrumented\nfun:getresgid=uninstrumented\nfun:getresuid=uninstrumented\nfun:getrlimit=uninstrumented\nfun:getrlimit64=uninstrumented\nfun:getrpcbyname=uninstrumented\nfun:getrpcbyname_r=uninstrumented\nfun:getrpcbynumber=uninstrumented\nfun:getrpcbynumber_r=uninstrumented\nfun:getrpcent=uninstrumented\nfun:getrpcent_r=uninstrumented\nfun:getrpcport=uninstrumented\nfun:getrusage=uninstrumented\nfun:gets=uninstrumented\nfun:getsecretkey=uninstrumented\nfun:getservbyname=uninstrumented\nfun:getservbyname_r=uninstrumented\nfun:getservbyport=uninstrumented\nfun:getservbyport_r=uninstrumented\nfun:getservent=uninstrumented\nfun:getservent_r=uninstrumented\nfun:getsgent=uninstrumented\nfun:getsgent_r=uninstrumented\nfun:getsgnam=uninstrumented\nfun:getsgnam_r=uninstrumented\nfun:getsid=uninstrumented\nfun:getsockname=uninstrumented\nfun:getsockopt=uninstrumented\nfun:getsourcefilter=uninstrumented\nfun:getspent=uninstrumented\nfun:getspent_r=uninstrumented\nfun:getspnam=uninstrumented\nfun:getspnam_r=uninstrumented\nfun:getsubopt=uninstrumented\nfun:gettext=uninstrumented\nfun:gettid=uninstrumented\nfun:gettimeofday=uninstrumented\nfun:getttyent=uninstrumented\nfun:getttynam=uninstrumented\nfun:getuid=uninstrumented\nfun:getusershell=uninstrumented\nfun:getutent=uninstrumented\nfun:getutent_r=uninstrumented\nfun:getutid=uninstrumented\nfun:getutid_r=uninstrumented\nfun:getutline=uninstrumented\nfun:getutline_r=uninstrumented\nfun:getutmp=uninstrumented\nfun:getutmpx=uninstrumented\nfun:getutxent=uninstrumented\nfun:getutxid=uninstrumented\nfun:getutxline=uninstrumented\nfun:getw=uninstrumented\nfun:getwc=uninstrumented\nfun:getwc_unlocked=uninstrumented\nfun:getwchar=uninstrumented\nfun:getwchar_unlocked=uninstrumented\nfun:getwd=uninstrumented\nfun:getxattr=uninstrumented\nfun:glob=uninstrumented\nfun:glob64=uninstrumented\nfun:glob_pattern_p=uninstrumented\nfun:globfree=uninstrumented\nfun:globfree64=uninstrumented\nfun:gmtime=uninstrumented\nfun:gmtime_r=uninstrumented\nfun:gnu_dev_major=uninstrumented\nfun:gnu_dev_makedev=uninstrumented\nfun:gnu_dev_minor=uninstrumented\nfun:gnu_get_libc_release=uninstrumented\nfun:gnu_get_libc_version=uninstrumented\nfun:grantpt=uninstrumented\nfun:group_member=uninstrumented\nfun:gsignal=uninstrumented\nfun:gtty=uninstrumented\nfun:hasmntopt=uninstrumented\nfun:hcreate=uninstrumented\nfun:hcreate_r=uninstrumented\nfun:hdestroy=uninstrumented\nfun:hdestroy_r=uninstrumented\nfun:herror=uninstrumented\nfun:host2netname=uninstrumented\nfun:hsearch=uninstrumented\nfun:hsearch_r=uninstrumented\nfun:hstrerror=uninstrumented\nfun:htonl=uninstrumented\nfun:htons=uninstrumented\nfun:hypot=uninstrumented\nfun:hypotf=uninstrumented\nfun:hypotf128=uninstrumented\nfun:hypotf32=uninstrumented\nfun:hypotf32x=uninstrumented\nfun:hypotf64=uninstrumented\nfun:hypotf64x=uninstrumented\nfun:hypotl=uninstrumented\nfun:iconv=uninstrumented\nfun:iconv_close=uninstrumented\nfun:iconv_open=uninstrumented\nfun:if_freenameindex=uninstrumented\nfun:if_indextoname=uninstrumented\nfun:if_nameindex=uninstrumented\nfun:if_nametoindex=uninstrumented\nfun:ilogb=uninstrumented\nfun:ilogbf=uninstrumented\nfun:ilogbf128=uninstrumented\nfun:ilogbf32=uninstrumented\nfun:ilogbf32x=uninstrumented\nfun:ilogbf64=uninstrumented\nfun:ilogbf64x=uninstrumented\nfun:ilogbl=uninstrumented\nfun:imaxabs=uninstrumented\nfun:imaxdiv=uninstrumented\nfun:index=uninstrumented\nfun:inet6_opt_append=uninstrumented\nfun:inet6_opt_find=uninstrumented\nfun:inet6_opt_finish=uninstrumented\nfun:inet6_opt_get_val=uninstrumented\nfun:inet6_opt_init=uninstrumented\nfun:inet6_opt_next=uninstrumented\nfun:inet6_opt_set_val=uninstrumented\nfun:inet6_option_alloc=uninstrumented\nfun:inet6_option_append=uninstrumented\nfun:inet6_option_find=uninstrumented\nfun:inet6_option_init=uninstrumented\nfun:inet6_option_next=uninstrumented\nfun:inet6_option_space=uninstrumented\nfun:inet6_rth_add=uninstrumented\nfun:inet6_rth_getaddr=uninstrumented\nfun:inet6_rth_init=uninstrumented\nfun:inet6_rth_reverse=uninstrumented\nfun:inet6_rth_segments=uninstrumented\nfun:inet6_rth_space=uninstrumented\nfun:inet_addr=uninstrumented\nfun:inet_aton=uninstrumented\nfun:inet_lnaof=uninstrumented\nfun:inet_makeaddr=uninstrumented\nfun:inet_net_ntop=uninstrumented\nfun:inet_net_pton=uninstrumented\nfun:inet_neta=uninstrumented\nfun:inet_netof=uninstrumented\nfun:inet_network=uninstrumented\nfun:inet_nsap_addr=uninstrumented\nfun:inet_nsap_ntoa=uninstrumented\nfun:inet_ntoa=uninstrumented\nfun:inet_ntop=uninstrumented\nfun:inet_pton=uninstrumented\nfun:init_module=uninstrumented\nfun:initgroups=uninstrumented\nfun:initstate=uninstrumented\nfun:initstate_r=uninstrumented\nfun:innetgr=uninstrumented\nfun:inotify_add_watch=uninstrumented\nfun:inotify_init=uninstrumented\nfun:inotify_init1=uninstrumented\nfun:inotify_rm_watch=uninstrumented\nfun:insque=uninstrumented\nfun:ioctl=uninstrumented\nfun:ioperm=uninstrumented\nfun:iopl=uninstrumented\nfun:iruserok=uninstrumented\nfun:iruserok_af=uninstrumented\nfun:isalnum=uninstrumented\nfun:isalnum_l=uninstrumented\nfun:isalpha=uninstrumented\nfun:isalpha_l=uninstrumented\nfun:isascii=uninstrumented\nfun:isastream=uninstrumented\nfun:isatty=uninstrumented\nfun:isblank=uninstrumented\nfun:isblank_l=uninstrumented\nfun:iscntrl=uninstrumented\nfun:iscntrl_l=uninstrumented\nfun:isctype=uninstrumented\nfun:isdigit=uninstrumented\nfun:isdigit_l=uninstrumented\nfun:isfdtype=uninstrumented\nfun:isgraph=uninstrumented\nfun:isgraph_l=uninstrumented\nfun:isinf=uninstrumented\nfun:isinfd128=uninstrumented\nfun:isinfd32=uninstrumented\nfun:isinfd64=uninstrumented\nfun:isinff=uninstrumented\nfun:isinfl=uninstrumented\nfun:islower=uninstrumented\nfun:islower_l=uninstrumented\nfun:isnan=uninstrumented\nfun:isnanf=uninstrumented\nfun:isnanl=uninstrumented\nfun:isprint=uninstrumented\nfun:isprint_l=uninstrumented\nfun:ispunct=uninstrumented\nfun:ispunct_l=uninstrumented\nfun:isspace=uninstrumented\nfun:isspace_l=uninstrumented\nfun:isupper=uninstrumented\nfun:isupper_l=uninstrumented\nfun:iswalnum=uninstrumented\nfun:iswalnum_l=uninstrumented\nfun:iswalpha=uninstrumented\nfun:iswalpha_l=uninstrumented\nfun:iswblank=uninstrumented\nfun:iswblank_l=uninstrumented\nfun:iswcntrl=uninstrumented\nfun:iswcntrl_l=uninstrumented\nfun:iswctype=uninstrumented\nfun:iswctype_l=uninstrumented\nfun:iswdigit=uninstrumented\nfun:iswdigit_l=uninstrumented\nfun:iswgraph=uninstrumented\nfun:iswgraph_l=uninstrumented\nfun:iswlower=uninstrumented\nfun:iswlower_l=uninstrumented\nfun:iswprint=uninstrumented\nfun:iswprint_l=uninstrumented\nfun:iswpunct=uninstrumented\nfun:iswpunct_l=uninstrumented\nfun:iswspace=uninstrumented\nfun:iswspace_l=uninstrumented\nfun:iswupper=uninstrumented\nfun:iswupper_l=uninstrumented\nfun:iswxdigit=uninstrumented\nfun:iswxdigit_l=uninstrumented\nfun:isxdigit=uninstrumented\nfun:isxdigit_l=uninstrumented\nfun:j0=uninstrumented\nfun:j0f=uninstrumented\nfun:j0f128=uninstrumented\nfun:j0f32=uninstrumented\nfun:j0f32x=uninstrumented\nfun:j0f64=uninstrumented\nfun:j0f64x=uninstrumented\nfun:j0l=uninstrumented\nfun:j1=uninstrumented\nfun:j1f=uninstrumented\nfun:j1f128=uninstrumented\nfun:j1f32=uninstrumented\nfun:j1f32x=uninstrumented\nfun:j1f64=uninstrumented\nfun:j1f64x=uninstrumented\nfun:j1l=uninstrumented\nfun:jn=uninstrumented\nfun:jnf=uninstrumented\nfun:jnf128=uninstrumented\nfun:jnf32=uninstrumented\nfun:jnf32x=uninstrumented\nfun:jnf64=uninstrumented\nfun:jnf64x=uninstrumented\nfun:jnl=uninstrumented\nfun:jrand48=uninstrumented\nfun:jrand48_r=uninstrumented\nfun:key_decryptsession=uninstrumented\nfun:key_decryptsession_pk=uninstrumented\nfun:key_encryptsession=uninstrumented\nfun:key_encryptsession_pk=uninstrumented\nfun:key_gendes=uninstrumented\nfun:key_get_conv=uninstrumented\nfun:key_secretkey_is_set=uninstrumented\nfun:key_setnet=uninstrumented\nfun:key_setsecret=uninstrumented\nfun:kill=uninstrumented\nfun:killpg=uninstrumented\nfun:klogctl=uninstrumented\nfun:l64a=uninstrumented\nfun:labs=uninstrumented\nfun:lchmod=uninstrumented\nfun:lchown=uninstrumented\nfun:lckpwdf=uninstrumented\nfun:lcong48=uninstrumented\nfun:lcong48_r=uninstrumented\nfun:ldexp=uninstrumented\nfun:ldexpf=uninstrumented\nfun:ldexpf128=uninstrumented\nfun:ldexpf32=uninstrumented\nfun:ldexpf32x=uninstrumented\nfun:ldexpf64=uninstrumented\nfun:ldexpf64x=uninstrumented\nfun:ldexpl=uninstrumented\nfun:ldiv=uninstrumented\nfun:lfind=uninstrumented\nfun:lgamma=uninstrumented\nfun:lgamma_r=uninstrumented\nfun:lgammaf=uninstrumented\nfun:lgammaf128=uninstrumented\nfun:lgammaf128_r=uninstrumented\nfun:lgammaf32=uninstrumented\nfun:lgammaf32_r=uninstrumented\nfun:lgammaf32x=uninstrumented\nfun:lgammaf32x_r=uninstrumented\nfun:lgammaf64=uninstrumented\nfun:lgammaf64_r=uninstrumented\nfun:lgammaf64x=uninstrumented\nfun:lgammaf64x_r=uninstrumented\nfun:lgammaf_r=uninstrumented\nfun:lgammal=uninstrumented\nfun:lgammal_r=uninstrumented\nfun:lgetxattr=uninstrumented\nfun:link=uninstrumented\nfun:linkat=uninstrumented\nfun:lio_listio=uninstrumented\nfun:lio_listio64=uninstrumented\nfun:listen=uninstrumented\nfun:listxattr=uninstrumented\nfun:llabs=uninstrumented\nfun:lldiv=uninstrumented\nfun:llistxattr=uninstrumented\nfun:llogb=uninstrumented\nfun:llogbf=uninstrumented\nfun:llogbf128=uninstrumented\nfun:llogbf32=uninstrumented\nfun:llogbf32x=uninstrumented\nfun:llogbf64=uninstrumented\nfun:llogbf64x=uninstrumented\nfun:llogbl=uninstrumented\nfun:llrint=uninstrumented\nfun:llrintf=uninstrumented\nfun:llrintf128=uninstrumented\nfun:llrintf32=uninstrumented\nfun:llrintf32x=uninstrumented\nfun:llrintf64=uninstrumented\nfun:llrintf64x=uninstrumented\nfun:llrintl=uninstrumented\nfun:llround=uninstrumented\nfun:llroundf=uninstrumented\nfun:llroundf128=uninstrumented\nfun:llroundf32=uninstrumented\nfun:llroundf32x=uninstrumented\nfun:llroundf64=uninstrumented\nfun:llroundf64x=uninstrumented\nfun:llroundl=uninstrumented\nfun:llseek=uninstrumented\nfun:localeconv=uninstrumented\nfun:localtime=uninstrumented\nfun:localtime_r=uninstrumented\nfun:lockf=uninstrumented\nfun:lockf64=uninstrumented\nfun:log=uninstrumented\nfun:log10=uninstrumented\nfun:log10f=uninstrumented\nfun:log10f128=uninstrumented\nfun:log10f32=uninstrumented\nfun:log10f32x=uninstrumented\nfun:log10f64=uninstrumented\nfun:log10f64x=uninstrumented\nfun:log10l=uninstrumented\nfun:log1p=uninstrumented\nfun:log1pf=uninstrumented\nfun:log1pf128=uninstrumented\nfun:log1pf32=uninstrumented\nfun:log1pf32x=uninstrumented\nfun:log1pf64=uninstrumented\nfun:log1pf64x=uninstrumented\nfun:log1pl=uninstrumented\nfun:log2=uninstrumented\nfun:log2f=uninstrumented\nfun:log2f128=uninstrumented\nfun:log2f32=uninstrumented\nfun:log2f32x=uninstrumented\nfun:log2f64=uninstrumented\nfun:log2f64x=uninstrumented\nfun:log2l=uninstrumented\nfun:logb=uninstrumented\nfun:logbf=uninstrumented\nfun:logbf128=uninstrumented\nfun:logbf32=uninstrumented\nfun:logbf32x=uninstrumented\nfun:logbf64=uninstrumented\nfun:logbf64x=uninstrumented\nfun:logbl=uninstrumented\nfun:logf=uninstrumented\nfun:logf128=uninstrumented\nfun:logf32=uninstrumented\nfun:logf32x=uninstrumented\nfun:logf64=uninstrumented\nfun:logf64x=uninstrumented\nfun:login=uninstrumented\nfun:login_tty=uninstrumented\nfun:logl=uninstrumented\nfun:logout=uninstrumented\nfun:logwtmp=uninstrumented\nfun:longjmp=uninstrumented\nfun:lrand48=uninstrumented\nfun:lrand48_r=uninstrumented\nfun:lremovexattr=uninstrumented\nfun:lrint=uninstrumented\nfun:lrintf=uninstrumented\nfun:lrintf128=uninstrumented\nfun:lrintf32=uninstrumented\nfun:lrintf32x=uninstrumented\nfun:lrintf64=uninstrumented\nfun:lrintf64x=uninstrumented\nfun:lrintl=uninstrumented\nfun:lround=uninstrumented\nfun:lroundf=uninstrumented\nfun:lroundf128=uninstrumented\nfun:lroundf32=uninstrumented\nfun:lroundf32x=uninstrumented\nfun:lroundf64=uninstrumented\nfun:lroundf64x=uninstrumented\nfun:lroundl=uninstrumented\nfun:lsearch=uninstrumented\nfun:lseek=uninstrumented\nfun:lseek64=uninstrumented\nfun:lsetxattr=uninstrumented\nfun:lstat=uninstrumented\nfun:lstat64=uninstrumented\nfun:lutimes=uninstrumented\nfun:madvise=uninstrumented\nfun:makecontext=uninstrumented\nfun:mallinfo=uninstrumented\nfun:mallinfo2=uninstrumented\nfun:malloc=uninstrumented\nfun:malloc_info=uninstrumented\nfun:malloc_stats=uninstrumented\nfun:malloc_trim=uninstrumented\nfun:malloc_usable_size=uninstrumented\nfun:mallopt=uninstrumented\nfun:matherr=uninstrumented\nfun:mblen=uninstrumented\nfun:mbrlen=uninstrumented\nfun:mbrtoc16=uninstrumented\nfun:mbrtoc32=uninstrumented\nfun:mbrtoc8=uninstrumented\nfun:mbrtowc=uninstrumented\nfun:mbsinit=uninstrumented\nfun:mbsnrtowcs=uninstrumented\nfun:mbsrtowcs=uninstrumented\nfun:mbstowcs=uninstrumented\nfun:mbtowc=uninstrumented\nfun:mcheck=uninstrumented\nfun:mcheck_check_all=uninstrumented\nfun:mcheck_pedantic=uninstrumented\nfun:mcount=uninstrumented\nfun:memalign=uninstrumented\nfun:memccpy=uninstrumented\nfun:memchr=uninstrumented\nfun:memcmp=uninstrumented\nfun:memcpy=uninstrumented\nfun:memfd_create=uninstrumented\nfun:memfrob=uninstrumented\nfun:memmem=uninstrumented\nfun:memmove=uninstrumented\nfun:mempcpy=uninstrumented\nfun:memrchr=uninstrumented\nfun:memset=uninstrumented\nfun:mincore=uninstrumented\nfun:mkdir=uninstrumented\nfun:mkdirat=uninstrumented\nfun:mkdtemp=uninstrumented\nfun:mkfifo=uninstrumented\nfun:mkfifoat=uninstrumented\nfun:mknod=uninstrumented\nfun:mknodat=uninstrumented\nfun:mkostemp=uninstrumented\nfun:mkostemp64=uninstrumented\nfun:mkostemps=uninstrumented\nfun:mkostemps64=uninstrumented\nfun:mkstemp=uninstrumented\nfun:mkstemp64=uninstrumented\nfun:mkstemps=uninstrumented\nfun:mkstemps64=uninstrumented\nfun:mktemp=uninstrumented\nfun:mktime=uninstrumented\nfun:mlock=uninstrumented\nfun:mlock2=uninstrumented\nfun:mlockall=uninstrumented\nfun:mmap=uninstrumented\nfun:mmap64=uninstrumented\nfun:modf=uninstrumented\nfun:modff=uninstrumented\nfun:modff128=uninstrumented\nfun:modff32=uninstrumented\nfun:modff32x=uninstrumented\nfun:modff64=uninstrumented\nfun:modff64x=uninstrumented\nfun:modfl=uninstrumented\nfun:modify_ldt=uninstrumented\nfun:moncontrol=uninstrumented\nfun:monstartup=uninstrumented\nfun:mount=uninstrumented\nfun:mount_setattr=uninstrumented\nfun:move_mount=uninstrumented\nfun:mprobe=uninstrumented\nfun:mprotect=uninstrumented\nfun:mq_close=uninstrumented\nfun:mq_getattr=uninstrumented\nfun:mq_notify=uninstrumented\nfun:mq_open=uninstrumented\nfun:mq_receive=uninstrumented\nfun:mq_send=uninstrumented\nfun:mq_setattr=uninstrumented\nfun:mq_timedreceive=uninstrumented\nfun:mq_timedsend=uninstrumented\nfun:mq_unlink=uninstrumented\nfun:mrand48=uninstrumented\nfun:mrand48_r=uninstrumented\nfun:mremap=uninstrumented\nfun:msgctl=uninstrumented\nfun:msgget=uninstrumented\nfun:msgrcv=uninstrumented\nfun:msgsnd=uninstrumented\nfun:msync=uninstrumented\nfun:mtrace=uninstrumented\nfun:mtx_destroy=uninstrumented\nfun:mtx_init=uninstrumented\nfun:mtx_lock=uninstrumented\nfun:mtx_timedlock=uninstrumented\nfun:mtx_trylock=uninstrumented\nfun:mtx_unlock=uninstrumented\nfun:munlock=uninstrumented\nfun:munlockall=uninstrumented\nfun:munmap=uninstrumented\nfun:muntrace=uninstrumented\nfun:name_to_handle_at=uninstrumented\nfun:nan=uninstrumented\nfun:nanf=uninstrumented\nfun:nanf128=uninstrumented\nfun:nanf32=uninstrumented\nfun:nanf32x=uninstrumented\nfun:nanf64=uninstrumented\nfun:nanf64x=uninstrumented\nfun:nanl=uninstrumented\nfun:nanosleep=uninstrumented\nfun:nearbyint=uninstrumented\nfun:nearbyintf=uninstrumented\nfun:nearbyintf128=uninstrumented\nfun:nearbyintf32=uninstrumented\nfun:nearbyintf32x=uninstrumented\nfun:nearbyintf64=uninstrumented\nfun:nearbyintf64x=uninstrumented\nfun:nearbyintl=uninstrumented\nfun:netname2host=uninstrumented\nfun:netname2user=uninstrumented\nfun:newlocale=uninstrumented\nfun:nextafter=uninstrumented\nfun:nextafterf=uninstrumented\nfun:nextafterf128=uninstrumented\nfun:nextafterf32=uninstrumented\nfun:nextafterf32x=uninstrumented\nfun:nextafterf64=uninstrumented\nfun:nextafterf64x=uninstrumented\nfun:nextafterl=uninstrumented\nfun:nextdown=uninstrumented\nfun:nextdownf=uninstrumented\nfun:nextdownf128=uninstrumented\nfun:nextdownf32=uninstrumented\nfun:nextdownf32x=uninstrumented\nfun:nextdownf64=uninstrumented\nfun:nextdownf64x=uninstrumented\nfun:nextdownl=uninstrumented\nfun:nexttoward=uninstrumented\nfun:nexttowardf=uninstrumented\nfun:nexttowardl=uninstrumented\nfun:nextup=uninstrumented\nfun:nextupf=uninstrumented\nfun:nextupf128=uninstrumented\nfun:nextupf32=uninstrumented\nfun:nextupf32x=uninstrumented\nfun:nextupf64=uninstrumented\nfun:nextupf64x=uninstrumented\nfun:nextupl=uninstrumented\nfun:nfsservctl=uninstrumented\nfun:nftw=uninstrumented\nfun:nftw64=uninstrumented\nfun:ngettext=uninstrumented\nfun:nice=uninstrumented\nfun:nis_add=uninstrumented\nfun:nis_add_entry=uninstrumented\nfun:nis_addmember=uninstrumented\nfun:nis_checkpoint=uninstrumented\nfun:nis_clone_directory=uninstrumented\nfun:nis_clone_object=uninstrumented\nfun:nis_clone_result=uninstrumented\nfun:nis_creategroup=uninstrumented\nfun:nis_destroy_object=uninstrumented\nfun:nis_destroygroup=uninstrumented\nfun:nis_dir_cmp=uninstrumented\nfun:nis_domain_of=uninstrumented\nfun:nis_domain_of_r=uninstrumented\nfun:nis_first_entry=uninstrumented\nfun:nis_free_directory=uninstrumented\nfun:nis_free_object=uninstrumented\nfun:nis_free_request=uninstrumented\nfun:nis_freenames=uninstrumented\nfun:nis_freeresult=uninstrumented\nfun:nis_freeservlist=uninstrumented\nfun:nis_freetags=uninstrumented\nfun:nis_getnames=uninstrumented\nfun:nis_getservlist=uninstrumented\nfun:nis_ismember=uninstrumented\nfun:nis_leaf_of=uninstrumented\nfun:nis_leaf_of_r=uninstrumented\nfun:nis_lerror=uninstrumented\nfun:nis_list=uninstrumented\nfun:nis_local_directory=uninstrumented\nfun:nis_local_group=uninstrumented\nfun:nis_local_host=uninstrumented\nfun:nis_local_principal=uninstrumented\nfun:nis_lookup=uninstrumented\nfun:nis_mkdir=uninstrumented\nfun:nis_modify=uninstrumented\nfun:nis_modify_entry=uninstrumented\nfun:nis_name_of=uninstrumented\nfun:nis_name_of_r=uninstrumented\nfun:nis_next_entry=uninstrumented\nfun:nis_perror=uninstrumented\nfun:nis_ping=uninstrumented\nfun:nis_print_directory=uninstrumented\nfun:nis_print_entry=uninstrumented\nfun:nis_print_group=uninstrumented\nfun:nis_print_group_entry=uninstrumented\nfun:nis_print_link=uninstrumented\nfun:nis_print_object=uninstrumented\nfun:nis_print_result=uninstrumented\nfun:nis_print_rights=uninstrumented\nfun:nis_print_table=uninstrumented\nfun:nis_read_obj=uninstrumented\nfun:nis_remove=uninstrumented\nfun:nis_remove_entry=uninstrumented\nfun:nis_removemember=uninstrumented\nfun:nis_rmdir=uninstrumented\nfun:nis_servstate=uninstrumented\nfun:nis_sperrno=uninstrumented\nfun:nis_sperror=uninstrumented\nfun:nis_sperror_r=uninstrumented\nfun:nis_stats=uninstrumented\nfun:nis_verifygroup=uninstrumented\nfun:nis_write_obj=uninstrumented\nfun:nl_langinfo=uninstrumented\nfun:nl_langinfo_l=uninstrumented\nfun:nrand48=uninstrumented\nfun:nrand48_r=uninstrumented\nfun:ns_datetosecs=uninstrumented\nfun:ns_format_ttl=uninstrumented\nfun:ns_get16=uninstrumented\nfun:ns_get32=uninstrumented\nfun:ns_initparse=uninstrumented\nfun:ns_makecanon=uninstrumented\nfun:ns_msg_getflag=uninstrumented\nfun:ns_name_compress=uninstrumented\nfun:ns_name_ntol=uninstrumented\nfun:ns_name_ntop=uninstrumented\nfun:ns_name_pack=uninstrumented\nfun:ns_name_pton=uninstrumented\nfun:ns_name_rollback=uninstrumented\nfun:ns_name_skip=uninstrumented\nfun:ns_name_uncompress=uninstrumented\nfun:ns_name_unpack=uninstrumented\nfun:ns_parse_ttl=uninstrumented\nfun:ns_parserr=uninstrumented\nfun:ns_put16=uninstrumented\nfun:ns_put32=uninstrumented\nfun:ns_samedomain=uninstrumented\nfun:ns_samename=uninstrumented\nfun:ns_skiprr=uninstrumented\nfun:ns_sprintrr=uninstrumented\nfun:ns_sprintrrf=uninstrumented\nfun:ns_subdomain=uninstrumented\nfun:ntohl=uninstrumented\nfun:ntohs=uninstrumented\nfun:ntp_adjtime=uninstrumented\nfun:ntp_gettime=uninstrumented\nfun:ntp_gettimex=uninstrumented\nfun:obstack_free=uninstrumented\nfun:obstack_printf=uninstrumented\nfun:obstack_vprintf=uninstrumented\nfun:on_exit=uninstrumented\nfun:open=uninstrumented\nfun:open64=uninstrumented\nfun:open_by_handle_at=uninstrumented\nfun:open_memstream=uninstrumented\nfun:open_tree=uninstrumented\nfun:open_wmemstream=uninstrumented\nfun:openat=uninstrumented\nfun:openat64=uninstrumented\nfun:opendir=uninstrumented\nfun:openlog=uninstrumented\nfun:openpty=uninstrumented\nfun:parse_printf_format=uninstrumented\nfun:passwd2des=uninstrumented\nfun:pathconf=uninstrumented\nfun:pause=uninstrumented\nfun:pclose=uninstrumented\nfun:perror=uninstrumented\nfun:personality=uninstrumented\nfun:pidfd_getfd=uninstrumented\nfun:pidfd_getpid=uninstrumented\nfun:pidfd_open=uninstrumented\nfun:pidfd_send_signal=uninstrumented\nfun:pidfd_spawn=uninstrumented\nfun:pidfd_spawnp=uninstrumented\nfun:pipe=uninstrumented\nfun:pipe2=uninstrumented\nfun:pivot_root=uninstrumented\nfun:pkey_alloc=uninstrumented\nfun:pkey_free=uninstrumented\nfun:pkey_get=uninstrumented\nfun:pkey_mprotect=uninstrumented\nfun:pkey_set=uninstrumented\nfun:pmap_getmaps=uninstrumented\nfun:pmap_getport=uninstrumented\nfun:pmap_rmtcall=uninstrumented\nfun:pmap_set=uninstrumented\nfun:pmap_unset=uninstrumented\nfun:poll=uninstrumented\nfun:popen=uninstrumented\nfun:posix_fadvise=uninstrumented\nfun:posix_fadvise64=uninstrumented\nfun:posix_fallocate=uninstrumented\nfun:posix_fallocate64=uninstrumented\nfun:posix_madvise=uninstrumented\nfun:posix_memalign=uninstrumented\nfun:posix_openpt=uninstrumented\nfun:posix_spawn=uninstrumented\nfun:posix_spawn_file_actions_addchdir_np=uninstrumented\nfun:posix_spawn_file_actions_addclose=uninstrumented\nfun:posix_spawn_file_actions_addclosefrom_np=uninstrumented\nfun:posix_spawn_file_actions_adddup2=uninstrumented\nfun:posix_spawn_file_actions_addfchdir_np=uninstrumented\nfun:posix_spawn_file_actions_addopen=uninstrumented\nfun:posix_spawn_file_actions_addtcsetpgrp_np=uninstrumented\nfun:posix_spawn_file_actions_destroy=uninstrumented\nfun:posix_spawn_file_actions_init=uninstrumented\nfun:posix_spawnattr_destroy=uninstrumented\nfun:posix_spawnattr_getcgroup_np=uninstrumented\nfun:posix_spawnattr_getflags=uninstrumented\nfun:posix_spawnattr_getpgroup=uninstrumented\nfun:posix_spawnattr_getschedparam=uninstrumented\nfun:posix_spawnattr_getschedpolicy=uninstrumented\nfun:posix_spawnattr_getsigdefault=uninstrumented\nfun:posix_spawnattr_getsigmask=uninstrumented\nfun:posix_spawnattr_init=uninstrumented\nfun:posix_spawnattr_setcgroup_np=uninstrumented\nfun:posix_spawnattr_setflags=uninstrumented\nfun:posix_spawnattr_setpgroup=uninstrumented\nfun:posix_spawnattr_setschedparam=uninstrumented\nfun:posix_spawnattr_setschedpolicy=uninstrumented\nfun:posix_spawnattr_setsigdefault=uninstrumented\nfun:posix_spawnattr_setsigmask=uninstrumented\nfun:posix_spawnp=uninstrumented\nfun:pow=uninstrumented\nfun:pow10=uninstrumented\nfun:pow10f=uninstrumented\nfun:pow10l=uninstrumented\nfun:powf=uninstrumented\nfun:powf128=uninstrumented\nfun:powf32=uninstrumented\nfun:powf32x=uninstrumented\nfun:powf64=uninstrumented\nfun:powf64x=uninstrumented\nfun:powl=uninstrumented\nfun:ppoll=uninstrumented\nfun:prctl=uninstrumented\nfun:pread=uninstrumented\nfun:pread64=uninstrumented\nfun:preadv=uninstrumented\nfun:preadv2=uninstrumented\nfun:preadv64=uninstrumented\nfun:preadv64v2=uninstrumented\nfun:printf=uninstrumented\nfun:printf_size=uninstrumented\nfun:printf_size_info=uninstrumented\nfun:prlimit=uninstrumented\nfun:prlimit64=uninstrumented\nfun:process_madvise=uninstrumented\nfun:process_mrelease=uninstrumented\nfun:process_vm_readv=uninstrumented\nfun:process_vm_writev=uninstrumented\nfun:profil=uninstrumented\nfun:pselect=uninstrumented\nfun:psiginfo=uninstrumented\nfun:psignal=uninstrumented\nfun:pthread_atfork=uninstrumented\nfun:pthread_attr_destroy=uninstrumented\nfun:pthread_attr_getaffinity_np=uninstrumented\nfun:pthread_attr_getdetachstate=uninstrumented\nfun:pthread_attr_getguardsize=uninstrumented\nfun:pthread_attr_getinheritsched=uninstrumented\nfun:pthread_attr_getschedparam=uninstrumented\nfun:pthread_attr_getschedpolicy=uninstrumented\nfun:pthread_attr_getscope=uninstrumented\nfun:pthread_attr_getsigmask_np=uninstrumented\nfun:pthread_attr_getstack=uninstrumented\nfun:pthread_attr_getstackaddr=uninstrumented\nfun:pthread_attr_getstacksize=uninstrumented\nfun:pthread_attr_init=uninstrumented\nfun:pthread_attr_setaffinity_np=uninstrumented\nfun:pthread_attr_setdetachstate=uninstrumented\nfun:pthread_attr_setguardsize=uninstrumented\nfun:pthread_attr_setinheritsched=uninstrumented\nfun:pthread_attr_setschedparam=uninstrumented\nfun:pthread_attr_setschedpolicy=uninstrumented\nfun:pthread_attr_setscope=uninstrumented\nfun:pthread_attr_setsigmask_np=uninstrumented\nfun:pthread_attr_setstack=uninstrumented\nfun:pthread_attr_setstackaddr=uninstrumented\nfun:pthread_attr_setstacksize=uninstrumented\nfun:pthread_barrier_destroy=uninstrumented\nfun:pthread_barrier_init=uninstrumented\nfun:pthread_barrier_wait=uninstrumented\nfun:pthread_barrierattr_destroy=uninstrumented\nfun:pthread_barrierattr_getpshared=uninstrumented\nfun:pthread_barrierattr_init=uninstrumented\nfun:pthread_barrierattr_setpshared=uninstrumented\nfun:pthread_cancel=uninstrumented\nfun:pthread_clockjoin_np=uninstrumented\nfun:pthread_cond_broadcast=uninstrumented\nfun:pthread_cond_clockwait=uninstrumented\nfun:pthread_cond_destroy=uninstrumented\nfun:pthread_cond_init=uninstrumented\nfun:pthread_cond_signal=uninstrumented\nfun:pthread_cond_timedwait=uninstrumented\nfun:pthread_cond_wait=uninstrumented\nfun:pthread_condattr_destroy=uninstrumented\nfun:pthread_condattr_getclock=uninstrumented\nfun:pthread_condattr_getpshared=uninstrumented\nfun:pthread_condattr_init=uninstrumented\nfun:pthread_condattr_setclock=uninstrumented\nfun:pthread_condattr_setpshared=uninstrumented\nfun:pthread_create=uninstrumented\nfun:pthread_detach=uninstrumented\nfun:pthread_equal=uninstrumented\nfun:pthread_exit=uninstrumented\nfun:pthread_getaffinity_np=uninstrumented\nfun:pthread_getattr_default_np=uninstrumented\nfun:pthread_getattr_np=uninstrumented\nfun:pthread_getconcurrency=uninstrumented\nfun:pthread_getcpuclockid=uninstrumented\nfun:pthread_getname_np=uninstrumented\nfun:pthread_getschedparam=uninstrumented\nfun:pthread_getspecific=uninstrumented\nfun:pthread_join=uninstrumented\nfun:pthread_key_create=uninstrumented\nfun:pthread_key_delete=uninstrumented\nfun:pthread_kill=uninstrumented\nfun:pthread_kill_other_threads_np=uninstrumented\nfun:pthread_mutex_clocklock=uninstrumented\nfun:pthread_mutex_consistent=uninstrumented\nfun:pthread_mutex_consistent_np=uninstrumented\nfun:pthread_mutex_destroy=uninstrumented\nfun:pthread_mutex_getprioceiling=uninstrumented\nfun:pthread_mutex_init=uninstrumented\nfun:pthread_mutex_lock=uninstrumented\nfun:pthread_mutex_setprioceiling=uninstrumented\nfun:pthread_mutex_timedlock=uninstrumented\nfun:pthread_mutex_trylock=uninstrumented\nfun:pthread_mutex_unlock=uninstrumented\nfun:pthread_mutexattr_destroy=uninstrumented\nfun:pthread_mutexattr_getkind_np=uninstrumented\nfun:pthread_mutexattr_getprioceiling=uninstrumented\nfun:pthread_mutexattr_getprotocol=uninstrumented\nfun:pthread_mutexattr_getpshared=uninstrumented\nfun:pthread_mutexattr_getrobust=uninstrumented\nfun:pthread_mutexattr_getrobust_np=uninstrumented\nfun:pthread_mutexattr_gettype=uninstrumented\nfun:pthread_mutexattr_init=uninstrumented\nfun:pthread_mutexattr_setkind_np=uninstrumented\nfun:pthread_mutexattr_setprioceiling=uninstrumented\nfun:pthread_mutexattr_setprotocol=uninstrumented\nfun:pthread_mutexattr_setpshared=uninstrumented\nfun:pthread_mutexattr_setrobust=uninstrumented\nfun:pthread_mutexattr_setrobust_np=uninstrumented\nfun:pthread_mutexattr_settype=uninstrumented\nfun:pthread_once=uninstrumented\nfun:pthread_rwlock_clockrdlock=uninstrumented\nfun:pthread_rwlock_clockwrlock=uninstrumented\nfun:pthread_rwlock_destroy=uninstrumented\nfun:pthread_rwlock_init=uninstrumented\nfun:pthread_rwlock_rdlock=uninstrumented\nfun:pthread_rwlock_timedrdlock=uninstrumented\nfun:pthread_rwlock_timedwrlock=uninstrumented\nfun:pthread_rwlock_tryrdlock=uninstrumented\nfun:pthread_rwlock_trywrlock=uninstrumented\nfun:pthread_rwlock_unlock=uninstrumented\nfun:pthread_rwlock_wrlock=uninstrumented\nfun:pthread_rwlockattr_destroy=uninstrumented\nfun:pthread_rwlockattr_getkind_np=uninstrumented\nfun:pthread_rwlockattr_getpshared=uninstrumented\nfun:pthread_rwlockattr_init=uninstrumented\nfun:pthread_rwlockattr_setkind_np=uninstrumented\nfun:pthread_rwlockattr_setpshared=uninstrumented\nfun:pthread_self=uninstrumented\nfun:pthread_setaffinity_np=uninstrumented\nfun:pthread_setattr_default_np=uninstrumented\nfun:pthread_setcancelstate=uninstrumented\nfun:pthread_setcanceltype=uninstrumented\nfun:pthread_setconcurrency=uninstrumented\nfun:pthread_setname_np=uninstrumented\nfun:pthread_setschedparam=uninstrumented\nfun:pthread_setschedprio=uninstrumented\nfun:pthread_setspecific=uninstrumented\nfun:pthread_sigmask=uninstrumented\nfun:pthread_sigqueue=uninstrumented\nfun:pthread_spin_destroy=uninstrumented\nfun:pthread_spin_init=uninstrumented\nfun:pthread_spin_lock=uninstrumented\nfun:pthread_spin_trylock=uninstrumented\nfun:pthread_spin_unlock=uninstrumented\nfun:pthread_testcancel=uninstrumented\nfun:pthread_timedjoin_np=uninstrumented\nfun:pthread_tryjoin_np=uninstrumented\nfun:pthread_yield=uninstrumented\nfun:ptrace=uninstrumented\nfun:ptsname=uninstrumented\nfun:ptsname_r=uninstrumented\nfun:putc=uninstrumented\nfun:putc_unlocked=uninstrumented\nfun:putchar=uninstrumented\nfun:putchar_unlocked=uninstrumented\nfun:putenv=uninstrumented\nfun:putgrent=uninstrumented\nfun:putmsg=uninstrumented\nfun:putpmsg=uninstrumented\nfun:putpwent=uninstrumented\nfun:puts=uninstrumented\nfun:putsgent=uninstrumented\nfun:putspent=uninstrumented\nfun:pututline=uninstrumented\nfun:pututxline=uninstrumented\nfun:putw=uninstrumented\nfun:putwc=uninstrumented\nfun:putwc_unlocked=uninstrumented\nfun:putwchar=uninstrumented\nfun:putwchar_unlocked=uninstrumented\nfun:pvalloc=uninstrumented\nfun:pwrite=uninstrumented\nfun:pwrite64=uninstrumented\nfun:pwritev=uninstrumented\nfun:pwritev2=uninstrumented\nfun:pwritev64=uninstrumented\nfun:pwritev64v2=uninstrumented\nfun:qecvt=uninstrumented\nfun:qecvt_r=uninstrumented\nfun:qfcvt=uninstrumented\nfun:qfcvt_r=uninstrumented\nfun:qgcvt=uninstrumented\nfun:qsort=uninstrumented\nfun:qsort_r=uninstrumented\nfun:query_module=uninstrumented\nfun:quick_exit=uninstrumented\nfun:quotactl=uninstrumented\nfun:raise=uninstrumented\nfun:rand=uninstrumented\nfun:rand_r=uninstrumented\nfun:random=uninstrumented\nfun:random_r=uninstrumented\nfun:rawmemchr=uninstrumented\nfun:rcmd=uninstrumented\nfun:rcmd_af=uninstrumented\nfun:re_comp=uninstrumented\nfun:re_compile_fastmap=uninstrumented\nfun:re_compile_pattern=uninstrumented\nfun:re_exec=uninstrumented\nfun:re_match=uninstrumented\nfun:re_match_2=uninstrumented\nfun:re_search=uninstrumented\nfun:re_search_2=uninstrumented\nfun:re_set_registers=uninstrumented\nfun:re_set_syntax=uninstrumented\nfun:read=uninstrumented\nfun:readColdStartFile=uninstrumented\nfun:readahead=uninstrumented\nfun:readdir=uninstrumented\nfun:readdir64=uninstrumented\nfun:readdir64_r=uninstrumented\nfun:readdir_r=uninstrumented\nfun:readlink=uninstrumented\nfun:readlinkat=uninstrumented\nfun:readv=uninstrumented\nfun:realloc=uninstrumented\nfun:reallocarray=uninstrumented\nfun:realpath=uninstrumented\nfun:reboot=uninstrumented\nfun:recv=uninstrumented\nfun:recvfrom=uninstrumented\nfun:recvmmsg=uninstrumented\nfun:recvmsg=uninstrumented\nfun:regcomp=uninstrumented\nfun:regerror=uninstrumented\nfun:regexec=uninstrumented\nfun:regfree=uninstrumented\nfun:register_printf_function=uninstrumented\nfun:register_printf_modifier=uninstrumented\nfun:register_printf_specifier=uninstrumented\nfun:register_printf_type=uninstrumented\nfun:registerrpc=uninstrumented\nfun:remainder=uninstrumented\nfun:remainderf=uninstrumented\nfun:remainderf128=uninstrumented\nfun:remainderf32=uninstrumented\nfun:remainderf32x=uninstrumented\nfun:remainderf64=uninstrumented\nfun:remainderf64x=uninstrumented\nfun:remainderl=uninstrumented\nfun:remap_file_pages=uninstrumented\nfun:remove=uninstrumented\nfun:removexattr=uninstrumented\nfun:remque=uninstrumented\nfun:remquo=uninstrumented\nfun:remquof=uninstrumented\nfun:remquof128=uninstrumented\nfun:remquof32=uninstrumented\nfun:remquof32x=uninstrumented\nfun:remquof64=uninstrumented\nfun:remquof64x=uninstrumented\nfun:remquol=uninstrumented\nfun:rename=uninstrumented\nfun:renameat=uninstrumented\nfun:renameat2=uninstrumented\nfun:res_dnok=uninstrumented\nfun:res_gethostbyaddr=uninstrumented\nfun:res_gethostbyname=uninstrumented\nfun:res_gethostbyname2=uninstrumented\nfun:res_hnok=uninstrumented\nfun:res_mailok=uninstrumented\nfun:res_mkquery=uninstrumented\nfun:res_nmkquery=uninstrumented\nfun:res_nquery=uninstrumented\nfun:res_nquerydomain=uninstrumented\nfun:res_nsearch=uninstrumented\nfun:res_nsend=uninstrumented\nfun:res_ownok=uninstrumented\nfun:res_query=uninstrumented\nfun:res_querydomain=uninstrumented\nfun:res_search=uninstrumented\nfun:res_send=uninstrumented\nfun:res_send_setqhook=uninstrumented\nfun:res_send_setrhook=uninstrumented\nfun:revoke=uninstrumented\nfun:rewind=uninstrumented\nfun:rewinddir=uninstrumented\nfun:rexec=uninstrumented\nfun:rexec_af=uninstrumented\nfun:rindex=uninstrumented\nfun:rint=uninstrumented\nfun:rintf=uninstrumented\nfun:rintf128=uninstrumented\nfun:rintf32=uninstrumented\nfun:rintf32x=uninstrumented\nfun:rintf64=uninstrumented\nfun:rintf64x=uninstrumented\nfun:rintl=uninstrumented\nfun:rmdir=uninstrumented\nfun:round=uninstrumented\nfun:roundeven=uninstrumented\nfun:roundevenf=uninstrumented\nfun:roundevenf128=uninstrumented\nfun:roundevenf32=uninstrumented\nfun:roundevenf32x=uninstrumented\nfun:roundevenf64=uninstrumented\nfun:roundevenf64x=uninstrumented\nfun:roundevenl=uninstrumented\nfun:roundf=uninstrumented\nfun:roundf128=uninstrumented\nfun:roundf32=uninstrumented\nfun:roundf32x=uninstrumented\nfun:roundf64=uninstrumented\nfun:roundf64x=uninstrumented\nfun:roundl=uninstrumented\nfun:rpmatch=uninstrumented\nfun:rresvport=uninstrumented\nfun:rresvport_af=uninstrumented\nfun:rtime=uninstrumented\nfun:ruserok=uninstrumented\nfun:ruserok_af=uninstrumented\nfun:ruserpass=uninstrumented\nfun:sbrk=uninstrumented\nfun:scalb=uninstrumented\nfun:scalbf=uninstrumented\nfun:scalbl=uninstrumented\nfun:scalbln=uninstrumented\nfun:scalblnf=uninstrumented\nfun:scalblnf128=uninstrumented\nfun:scalblnf32=uninstrumented\nfun:scalblnf32x=uninstrumented\nfun:scalblnf64=uninstrumented\nfun:scalblnf64x=uninstrumented\nfun:scalblnl=uninstrumented\nfun:scalbn=uninstrumented\nfun:scalbnf=uninstrumented\nfun:scalbnf128=uninstrumented\nfun:scalbnf32=uninstrumented\nfun:scalbnf32x=uninstrumented\nfun:scalbnf64=uninstrumented\nfun:scalbnf64x=uninstrumented\nfun:scalbnl=uninstrumented\nfun:scandir=uninstrumented\nfun:scandir64=uninstrumented\nfun:scandirat=uninstrumented\nfun:scandirat64=uninstrumented\nfun:scanf=uninstrumented\nfun:sched_get_priority_max=uninstrumented\nfun:sched_get_priority_min=uninstrumented\nfun:sched_getaffinity=uninstrumented\nfun:sched_getcpu=uninstrumented\nfun:sched_getparam=uninstrumented\nfun:sched_getscheduler=uninstrumented\nfun:sched_rr_get_interval=uninstrumented\nfun:sched_setaffinity=uninstrumented\nfun:sched_setparam=uninstrumented\nfun:sched_setscheduler=uninstrumented\nfun:sched_yield=uninstrumented\nfun:secure_getenv=uninstrumented\nfun:seed48=uninstrumented\nfun:seed48_r=uninstrumented\nfun:seekdir=uninstrumented\nfun:select=uninstrumented\nfun:sem_clockwait=uninstrumented\nfun:sem_close=uninstrumented\nfun:sem_destroy=uninstrumented\nfun:sem_getvalue=uninstrumented\nfun:sem_init=uninstrumented\nfun:sem_open=uninstrumented\nfun:sem_post=uninstrumented\nfun:sem_timedwait=uninstrumented\nfun:sem_trywait=uninstrumented\nfun:sem_unlink=uninstrumented\nfun:sem_wait=uninstrumented\nfun:semctl=uninstrumented\nfun:semget=uninstrumented\nfun:semop=uninstrumented\nfun:semtimedop=uninstrumented\nfun:send=uninstrumented\nfun:sendfile=uninstrumented\nfun:sendfile64=uninstrumented\nfun:sendmmsg=uninstrumented\nfun:sendmsg=uninstrumented\nfun:sendto=uninstrumented\nfun:setaliasent=uninstrumented\nfun:setbuf=uninstrumented\nfun:setbuffer=uninstrumented\nfun:setcontext=uninstrumented\nfun:setdomainname=uninstrumented\nfun:setegid=uninstrumented\nfun:setenv=uninstrumented\nfun:seteuid=uninstrumented\nfun:setfsent=uninstrumented\nfun:setfsgid=uninstrumented\nfun:setfsuid=uninstrumented\nfun:setgid=uninstrumented\nfun:setgrent=uninstrumented\nfun:setgroups=uninstrumented\nfun:sethostent=uninstrumented\nfun:sethostid=uninstrumented\nfun:sethostname=uninstrumented\nfun:setipv4sourcefilter=uninstrumented\nfun:setitimer=uninstrumented\nfun:setjmp=uninstrumented\nfun:setkey=uninstrumented\nfun:setkey_r=uninstrumented\nfun:setlinebuf=uninstrumented\nfun:setlocale=uninstrumented\nfun:setlogin=uninstrumented\nfun:setlogmask=uninstrumented\nfun:setmntent=uninstrumented\nfun:setnetent=uninstrumented\nfun:setnetgrent=uninstrumented\nfun:setns=uninstrumented\nfun:setpayload=uninstrumented\nfun:setpayloadf=uninstrumented\nfun:setpayloadf128=uninstrumented\nfun:setpayloadf32=uninstrumented\nfun:setpayloadf32x=uninstrumented\nfun:setpayloadf64=uninstrumented\nfun:setpayloadf64x=uninstrumented\nfun:setpayloadl=uninstrumented\nfun:setpayloadsig=uninstrumented\nfun:setpayloadsigf=uninstrumented\nfun:setpayloadsigf128=uninstrumented\nfun:setpayloadsigf32=uninstrumented\nfun:setpayloadsigf32x=uninstrumented\nfun:setpayloadsigf64=uninstrumented\nfun:setpayloadsigf64x=uninstrumented\nfun:setpayloadsigl=uninstrumented\nfun:setpgid=uninstrumented\nfun:setpgrp=uninstrumented\nfun:setpriority=uninstrumented\nfun:setprotoent=uninstrumented\nfun:setpwent=uninstrumented\nfun:setregid=uninstrumented\nfun:setresgid=uninstrumented\nfun:setresuid=uninstrumented\nfun:setreuid=uninstrumented\nfun:setrlimit=uninstrumented\nfun:setrlimit64=uninstrumented\nfun:setrpcent=uninstrumented\nfun:setservent=uninstrumented\nfun:setsgent=uninstrumented\nfun:setsid=uninstrumented\nfun:setsockopt=uninstrumented\nfun:setsourcefilter=uninstrumented\nfun:setspent=uninstrumented\nfun:setstate=uninstrumented\nfun:setstate_r=uninstrumented\nfun:settimeofday=uninstrumented\nfun:setttyent=uninstrumented\nfun:setuid=uninstrumented\nfun:setusershell=uninstrumented\nfun:setutent=uninstrumented\nfun:setutxent=uninstrumented\nfun:setvbuf=uninstrumented\nfun:setxattr=uninstrumented\nfun:sgetsgent=uninstrumented\nfun:sgetsgent_r=uninstrumented\nfun:sgetspent=uninstrumented\nfun:sgetspent_r=uninstrumented\nfun:shm_open=uninstrumented\nfun:shm_unlink=uninstrumented\nfun:shmat=uninstrumented\nfun:shmctl=uninstrumented\nfun:shmdt=uninstrumented\nfun:shmget=uninstrumented\nfun:shutdown=uninstrumented\nfun:sigabbrev_np=uninstrumented\nfun:sigaction=uninstrumented\nfun:sigaddset=uninstrumented\nfun:sigaltstack=uninstrumented\nfun:sigandset=uninstrumented\nfun:sigblock=uninstrumented\nfun:sigdelset=uninstrumented\nfun:sigdescr_np=uninstrumented\nfun:sigemptyset=uninstrumented\nfun:sigfillset=uninstrumented\nfun:siggetmask=uninstrumented\nfun:sighold=uninstrumented\nfun:sigignore=uninstrumented\nfun:siginterrupt=uninstrumented\nfun:sigisemptyset=uninstrumented\nfun:sigismember=uninstrumented\nfun:siglongjmp=uninstrumented\nfun:signal=uninstrumented\nfun:signalfd=uninstrumented\nfun:significand=uninstrumented\nfun:significandf=uninstrumented\nfun:significandl=uninstrumented\nfun:sigorset=uninstrumented\nfun:sigpause=uninstrumented\nfun:sigpending=uninstrumented\nfun:sigprocmask=uninstrumented\nfun:sigqueue=uninstrumented\nfun:sigrelse=uninstrumented\nfun:sigreturn=uninstrumented\nfun:sigset=uninstrumented\nfun:sigsetmask=uninstrumented\nfun:sigstack=uninstrumented\nfun:sigsuspend=uninstrumented\nfun:sigtimedwait=uninstrumented\nfun:sigvec=uninstrumented\nfun:sigwait=uninstrumented\nfun:sigwaitinfo=uninstrumented\nfun:sin=uninstrumented\nfun:sincos=uninstrumented\nfun:sincosf=uninstrumented\nfun:sincosf128=uninstrumented\nfun:sincosf32=uninstrumented\nfun:sincosf32x=uninstrumented\nfun:sincosf64=uninstrumented\nfun:sincosf64x=uninstrumented\nfun:sincosl=uninstrumented\nfun:sinf=uninstrumented\nfun:sinf128=uninstrumented\nfun:sinf32=uninstrumented\nfun:sinf32x=uninstrumented\nfun:sinf64=uninstrumented\nfun:sinf64x=uninstrumented\nfun:sinh=uninstrumented\nfun:sinhf=uninstrumented\nfun:sinhf128=uninstrumented\nfun:sinhf32=uninstrumented\nfun:sinhf32x=uninstrumented\nfun:sinhf64=uninstrumented\nfun:sinhf64x=uninstrumented\nfun:sinhl=uninstrumented\nfun:sinl=uninstrumented\nfun:sleep=uninstrumented\nfun:snprintf=uninstrumented\nfun:sockatmark=uninstrumented\nfun:socket=uninstrumented\nfun:socketpair=uninstrumented\nfun:splice=uninstrumented\nfun:sprintf=uninstrumented\nfun:sprofil=uninstrumented\nfun:sqrt=uninstrumented\nfun:sqrtf=uninstrumented\nfun:sqrtf128=uninstrumented\nfun:sqrtf32=uninstrumented\nfun:sqrtf32x=uninstrumented\nfun:sqrtf64=uninstrumented\nfun:sqrtf64x=uninstrumented\nfun:sqrtl=uninstrumented\nfun:srand=uninstrumented\nfun:srand48=uninstrumented\nfun:srand48_r=uninstrumented\nfun:srandom=uninstrumented\nfun:srandom_r=uninstrumented\nfun:sscanf=uninstrumented\nfun:ssignal=uninstrumented\nfun:sstk=uninstrumented\nfun:stat=uninstrumented\nfun:stat64=uninstrumented\nfun:statfs=uninstrumented\nfun:statfs64=uninstrumented\nfun:statvfs=uninstrumented\nfun:statvfs64=uninstrumented\nfun:statx=uninstrumented\nfun:stdc_bit_ceil_uc=uninstrumented\nfun:stdc_bit_ceil_ui=uninstrumented\nfun:stdc_bit_ceil_ul=uninstrumented\nfun:stdc_bit_ceil_ull=uninstrumented\nfun:stdc_bit_ceil_us=uninstrumented\nfun:stdc_bit_floor_uc=uninstrumented\nfun:stdc_bit_floor_ui=uninstrumented\nfun:stdc_bit_floor_ul=uninstrumented\nfun:stdc_bit_floor_ull=uninstrumented\nfun:stdc_bit_floor_us=uninstrumented\nfun:stdc_bit_width_uc=uninstrumented\nfun:stdc_bit_width_ui=uninstrumented\nfun:stdc_bit_width_ul=uninstrumented\nfun:stdc_bit_width_ull=uninstrumented\nfun:stdc_bit_width_us=uninstrumented\nfun:stdc_count_ones_uc=uninstrumented\nfun:stdc_count_ones_ui=uninstrumented\nfun:stdc_count_ones_ul=uninstrumented\nfun:stdc_count_ones_ull=uninstrumented\nfun:stdc_count_ones_us=uninstrumented\nfun:stdc_count_zeros_uc=uninstrumented\nfun:stdc_count_zeros_ui=uninstrumented\nfun:stdc_count_zeros_ul=uninstrumented\nfun:stdc_count_zeros_ull=uninstrumented\nfun:stdc_count_zeros_us=uninstrumented\nfun:stdc_first_leading_one_uc=uninstrumented\nfun:stdc_first_leading_one_ui=uninstrumented\nfun:stdc_first_leading_one_ul=uninstrumented\nfun:stdc_first_leading_one_ull=uninstrumented\nfun:stdc_first_leading_one_us=uninstrumented\nfun:stdc_first_leading_zero_uc=uninstrumented\nfun:stdc_first_leading_zero_ui=uninstrumented\nfun:stdc_first_leading_zero_ul=uninstrumented\nfun:stdc_first_leading_zero_ull=uninstrumented\nfun:stdc_first_leading_zero_us=uninstrumented\nfun:stdc_first_trailing_one_uc=uninstrumented\nfun:stdc_first_trailing_one_ui=uninstrumented\nfun:stdc_first_trailing_one_ul=uninstrumented\nfun:stdc_first_trailing_one_ull=uninstrumented\nfun:stdc_first_trailing_one_us=uninstrumented\nfun:stdc_first_trailing_zero_uc=uninstrumented\nfun:stdc_first_trailing_zero_ui=uninstrumented\nfun:stdc_first_trailing_zero_ul=uninstrumented\nfun:stdc_first_trailing_zero_ull=uninstrumented\nfun:stdc_first_trailing_zero_us=uninstrumented\nfun:stdc_has_single_bit_uc=uninstrumented\nfun:stdc_has_single_bit_ui=uninstrumented\nfun:stdc_has_single_bit_ul=uninstrumented\nfun:stdc_has_single_bit_ull=uninstrumented\nfun:stdc_has_single_bit_us=uninstrumented\nfun:stdc_leading_ones_uc=uninstrumented\nfun:stdc_leading_ones_ui=uninstrumented\nfun:stdc_leading_ones_ul=uninstrumented\nfun:stdc_leading_ones_ull=uninstrumented\nfun:stdc_leading_ones_us=uninstrumented\nfun:stdc_leading_zeros_uc=uninstrumented\nfun:stdc_leading_zeros_ui=uninstrumented\nfun:stdc_leading_zeros_ul=uninstrumented\nfun:stdc_leading_zeros_ull=uninstrumented\nfun:stdc_leading_zeros_us=uninstrumented\nfun:stdc_trailing_ones_uc=uninstrumented\nfun:stdc_trailing_ones_ui=uninstrumented\nfun:stdc_trailing_ones_ul=uninstrumented\nfun:stdc_trailing_ones_ull=uninstrumented\nfun:stdc_trailing_ones_us=uninstrumented\nfun:stdc_trailing_zeros_uc=uninstrumented\nfun:stdc_trailing_zeros_ui=uninstrumented\nfun:stdc_trailing_zeros_ul=uninstrumented\nfun:stdc_trailing_zeros_ull=uninstrumented\nfun:stdc_trailing_zeros_us=uninstrumented\nfun:step=uninstrumented\nfun:stime=uninstrumented\nfun:stpcpy=uninstrumented\nfun:stpncpy=uninstrumented\nfun:strcasecmp=uninstrumented\nfun:strcasecmp_l=uninstrumented\nfun:strcasestr=uninstrumented\nfun:strcat=uninstrumented\nfun:strchr=uninstrumented\nfun:strchrnul=uninstrumented\nfun:strcmp=uninstrumented\nfun:strcoll=uninstrumented\nfun:strcoll_l=uninstrumented\nfun:strcpy=uninstrumented\nfun:strcspn=uninstrumented\nfun:strdup=uninstrumented\nfun:strerror=uninstrumented\nfun:strerror_l=uninstrumented\nfun:strerror_r=uninstrumented\nfun:strerrordesc_np=uninstrumented\nfun:strerrorname_np=uninstrumented\nfun:strfmon=uninstrumented\nfun:strfmon_l=uninstrumented\nfun:strfromd=uninstrumented\nfun:strfromf=uninstrumented\nfun:strfromf128=uninstrumented\nfun:strfromf32=uninstrumented\nfun:strfromf32x=uninstrumented\nfun:strfromf64=uninstrumented\nfun:strfromf64x=uninstrumented\nfun:strfroml=uninstrumented\nfun:strfry=uninstrumented\nfun:strftime=uninstrumented\nfun:strftime_l=uninstrumented\nfun:strlcat=uninstrumented\nfun:strlcpy=uninstrumented\nfun:strlen=uninstrumented\nfun:strncasecmp=uninstrumented\nfun:strncasecmp_l=uninstrumented\nfun:strncat=uninstrumented\nfun:strncmp=uninstrumented\nfun:strncpy=uninstrumented\nfun:strndup=uninstrumented\nfun:strnlen=uninstrumented\nfun:strpbrk=uninstrumented\nfun:strptime=uninstrumented\nfun:strptime_l=uninstrumented\nfun:strrchr=uninstrumented\nfun:strsep=uninstrumented\nfun:strsignal=uninstrumented\nfun:strspn=uninstrumented\nfun:strstr=uninstrumented\nfun:strtod=uninstrumented\nfun:strtod_l=uninstrumented\nfun:strtof=uninstrumented\nfun:strtof128=uninstrumented\nfun:strtof128_l=uninstrumented\nfun:strtof32=uninstrumented\nfun:strtof32_l=uninstrumented\nfun:strtof32x=uninstrumented\nfun:strtof32x_l=uninstrumented\nfun:strtof64=uninstrumented\nfun:strtof64_l=uninstrumented\nfun:strtof64x=uninstrumented\nfun:strtof64x_l=uninstrumented\nfun:strtof_l=uninstrumented\nfun:strtoimax=uninstrumented\nfun:strtok=uninstrumented\nfun:strtok_r=uninstrumented\nfun:strtol=uninstrumented\nfun:strtol_l=uninstrumented\nfun:strtold=uninstrumented\nfun:strtold_l=uninstrumented\nfun:strtoll=uninstrumented\nfun:strtoll_l=uninstrumented\nfun:strtoq=uninstrumented\nfun:strtoul=uninstrumented\nfun:strtoul_l=uninstrumented\nfun:strtoull=uninstrumented\nfun:strtoull_l=uninstrumented\nfun:strtoumax=uninstrumented\nfun:strtouq=uninstrumented\nfun:strverscmp=uninstrumented\nfun:strxfrm=uninstrumented\nfun:strxfrm_l=uninstrumented\nfun:stty=uninstrumented\nfun:svc_exit=uninstrumented\nfun:svc_getreq=uninstrumented\nfun:svc_getreq_common=uninstrumented\nfun:svc_getreq_poll=uninstrumented\nfun:svc_getreqset=uninstrumented\nfun:svc_register=uninstrumented\nfun:svc_run=uninstrumented\nfun:svc_sendreply=uninstrumented\nfun:svc_unregister=uninstrumented\nfun:svcerr_auth=uninstrumented\nfun:svcerr_decode=uninstrumented\nfun:svcerr_noproc=uninstrumented\nfun:svcerr_noprog=uninstrumented\nfun:svcerr_progvers=uninstrumented\nfun:svcerr_systemerr=uninstrumented\nfun:svcerr_weakauth=uninstrumented\nfun:svcfd_create=uninstrumented\nfun:svcraw_create=uninstrumented\nfun:svctcp_create=uninstrumented\nfun:svcudp_bufcreate=uninstrumented\nfun:svcudp_create=uninstrumented\nfun:svcudp_enablecache=uninstrumented\nfun:svcunix_create=uninstrumented\nfun:svcunixfd_create=uninstrumented\nfun:swab=uninstrumented\nfun:swapcontext=uninstrumented\nfun:swapoff=uninstrumented\nfun:swapon=uninstrumented\nfun:swprintf=uninstrumented\nfun:swscanf=uninstrumented\nfun:symlink=uninstrumented\nfun:symlinkat=uninstrumented\nfun:sync=uninstrumented\nfun:sync_file_range=uninstrumented\nfun:syncfs=uninstrumented\nfun:syscall=uninstrumented\nfun:sysconf=uninstrumented\nfun:sysctl=uninstrumented\nfun:sysinfo=uninstrumented\nfun:syslog=uninstrumented\nfun:system=uninstrumented\nfun:sysv_signal=uninstrumented\nfun:tan=uninstrumented\nfun:tanf=uninstrumented\nfun:tanf128=uninstrumented\nfun:tanf32=uninstrumented\nfun:tanf32x=uninstrumented\nfun:tanf64=uninstrumented\nfun:tanf64x=uninstrumented\nfun:tanh=uninstrumented\nfun:tanhf=uninstrumented\nfun:tanhf128=uninstrumented\nfun:tanhf32=uninstrumented\nfun:tanhf32x=uninstrumented\nfun:tanhf64=uninstrumented\nfun:tanhf64x=uninstrumented\nfun:tanhl=uninstrumented\nfun:tanl=uninstrumented\nfun:tcdrain=uninstrumented\nfun:tcflow=uninstrumented\nfun:tcflush=uninstrumented\nfun:tcgetattr=uninstrumented\nfun:tcgetpgrp=uninstrumented\nfun:tcgetsid=uninstrumented\nfun:tcsendbreak=uninstrumented\nfun:tcsetattr=uninstrumented\nfun:tcsetpgrp=uninstrumented\nfun:td_init=uninstrumented\nfun:td_log=uninstrumented\nfun:td_symbol_list=uninstrumented\nfun:td_ta_clear_event=uninstrumented\nfun:td_ta_delete=uninstrumented\nfun:td_ta_enable_stats=uninstrumented\nfun:td_ta_event_addr=uninstrumented\nfun:td_ta_event_getmsg=uninstrumented\nfun:td_ta_get_nthreads=uninstrumented\nfun:td_ta_get_ph=uninstrumented\nfun:td_ta_get_stats=uninstrumented\nfun:td_ta_map_id2thr=uninstrumented\nfun:td_ta_map_lwp2thr=uninstrumented\nfun:td_ta_new=uninstrumented\nfun:td_ta_reset_stats=uninstrumented\nfun:td_ta_set_event=uninstrumented\nfun:td_ta_setconcurrency=uninstrumented\nfun:td_ta_thr_iter=uninstrumented\nfun:td_ta_tsd_iter=uninstrumented\nfun:td_thr_clear_event=uninstrumented\nfun:td_thr_dbresume=uninstrumented\nfun:td_thr_dbsuspend=uninstrumented\nfun:td_thr_event_enable=uninstrumented\nfun:td_thr_event_getmsg=uninstrumented\nfun:td_thr_get_info=uninstrumented\nfun:td_thr_getfpregs=uninstrumented\nfun:td_thr_getgregs=uninstrumented\nfun:td_thr_getxregs=uninstrumented\nfun:td_thr_getxregsize=uninstrumented\nfun:td_thr_set_event=uninstrumented\nfun:td_thr_setfpregs=uninstrumented\nfun:td_thr_setgregs=uninstrumented\nfun:td_thr_setprio=uninstrumented\nfun:td_thr_setsigpending=uninstrumented\nfun:td_thr_setxregs=uninstrumented\nfun:td_thr_sigsetmask=uninstrumented\nfun:td_thr_tls_get_addr=uninstrumented\nfun:td_thr_tlsbase=uninstrumented\nfun:td_thr_tsd=uninstrumented\nfun:td_thr_validate=uninstrumented\nfun:tdelete=uninstrumented\nfun:tdestroy=uninstrumented\nfun:tee=uninstrumented\nfun:telldir=uninstrumented\nfun:tempnam=uninstrumented\nfun:textdomain=uninstrumented\nfun:tfind=uninstrumented\nfun:tgamma=uninstrumented\nfun:tgammaf=uninstrumented\nfun:tgammaf128=uninstrumented\nfun:tgammaf32=uninstrumented\nfun:tgammaf32x=uninstrumented\nfun:tgammaf64=uninstrumented\nfun:tgammaf64x=uninstrumented\nfun:tgammal=uninstrumented\nfun:tgkill=uninstrumented\nfun:thrd_create=uninstrumented\nfun:thrd_current=uninstrumented\nfun:thrd_detach=uninstrumented\nfun:thrd_equal=uninstrumented\nfun:thrd_exit=uninstrumented\nfun:thrd_join=uninstrumented\nfun:thrd_sleep=uninstrumented\nfun:thrd_yield=uninstrumented\nfun:time=uninstrumented\nfun:timegm=uninstrumented\nfun:timelocal=uninstrumented\nfun:timer_create=uninstrumented\nfun:timer_delete=uninstrumented\nfun:timer_getoverrun=uninstrumented\nfun:timer_gettime=uninstrumented\nfun:timer_settime=uninstrumented\nfun:timerfd_create=uninstrumented\nfun:timerfd_gettime=uninstrumented\nfun:timerfd_settime=uninstrumented\nfun:times=uninstrumented\nfun:timespec_get=uninstrumented\nfun:timespec_getres=uninstrumented\nfun:tmpfile=uninstrumented\nfun:tmpfile64=uninstrumented\nfun:tmpnam=uninstrumented\nfun:tmpnam_r=uninstrumented\nfun:toascii=uninstrumented\nfun:tolower=uninstrumented\nfun:tolower_l=uninstrumented\nfun:totalorder=uninstrumented\nfun:totalorderf=uninstrumented\nfun:totalorderf128=uninstrumented\nfun:totalorderf32=uninstrumented\nfun:totalorderf32x=uninstrumented\nfun:totalorderf64=uninstrumented\nfun:totalorderf64x=uninstrumented\nfun:totalorderl=uninstrumented\nfun:totalordermag=uninstrumented\nfun:totalordermagf=uninstrumented\nfun:totalordermagf128=uninstrumented\nfun:totalordermagf32=uninstrumented\nfun:totalordermagf32x=uninstrumented\nfun:totalordermagf64=uninstrumented\nfun:totalordermagf64x=uninstrumented\nfun:totalordermagl=uninstrumented\nfun:toupper=uninstrumented\nfun:toupper_l=uninstrumented\nfun:towctrans=uninstrumented\nfun:towctrans_l=uninstrumented\nfun:towlower=uninstrumented\nfun:towlower_l=uninstrumented\nfun:towupper=uninstrumented\nfun:towupper_l=uninstrumented\nfun:tr_break=uninstrumented\nfun:trunc=uninstrumented\nfun:truncate=uninstrumented\nfun:truncate64=uninstrumented\nfun:truncf=uninstrumented\nfun:truncf128=uninstrumented\nfun:truncf32=uninstrumented\nfun:truncf32x=uninstrumented\nfun:truncf64=uninstrumented\nfun:truncf64x=uninstrumented\nfun:truncl=uninstrumented\nfun:tsearch=uninstrumented\nfun:tss_create=uninstrumented\nfun:tss_delete=uninstrumented\nfun:tss_get=uninstrumented\nfun:tss_set=uninstrumented\nfun:ttyname=uninstrumented\nfun:ttyname_r=uninstrumented\nfun:ttyslot=uninstrumented\nfun:twalk=uninstrumented\nfun:twalk_r=uninstrumented\nfun:tzset=uninstrumented\nfun:ualarm=uninstrumented\nfun:ufromfp=uninstrumented\nfun:ufromfpf=uninstrumented\nfun:ufromfpf128=uninstrumented\nfun:ufromfpf32=uninstrumented\nfun:ufromfpf32x=uninstrumented\nfun:ufromfpf64=uninstrumented\nfun:ufromfpf64x=uninstrumented\nfun:ufromfpl=uninstrumented\nfun:ufromfpx=uninstrumented\nfun:ufromfpxf=uninstrumented\nfun:ufromfpxf128=uninstrumented\nfun:ufromfpxf32=uninstrumented\nfun:ufromfpxf32x=uninstrumented\nfun:ufromfpxf64=uninstrumented\nfun:ufromfpxf64x=uninstrumented\nfun:ufromfpxl=uninstrumented\nfun:ulckpwdf=uninstrumented\nfun:ulimit=uninstrumented\nfun:umask=uninstrumented\nfun:umount=uninstrumented\nfun:umount2=uninstrumented\nfun:uname=uninstrumented\nfun:ungetc=uninstrumented\nfun:ungetwc=uninstrumented\nfun:unlink=uninstrumented\nfun:unlinkat=uninstrumented\nfun:unlockpt=uninstrumented\nfun:unsetenv=uninstrumented\nfun:unshare=uninstrumented\nfun:updwtmp=uninstrumented\nfun:updwtmpx=uninstrumented\nfun:uselib=uninstrumented\nfun:uselocale=uninstrumented\nfun:user2netname=uninstrumented\nfun:usleep=uninstrumented\nfun:ustat=uninstrumented\nfun:utime=uninstrumented\nfun:utimensat=uninstrumented\nfun:utimes=uninstrumented\nfun:utmpname=uninstrumented\nfun:utmpxname=uninstrumented\nfun:valloc=uninstrumented\nfun:vasprintf=uninstrumented\nfun:vdprintf=uninstrumented\nfun:verr=uninstrumented\nfun:verrx=uninstrumented\nfun:versionsort=uninstrumented\nfun:versionsort64=uninstrumented\nfun:vfork=uninstrumented\nfun:vfprintf=uninstrumented\nfun:vfscanf=uninstrumented\nfun:vfwprintf=uninstrumented\nfun:vfwscanf=uninstrumented\nfun:vhangup=uninstrumented\nfun:vlimit=uninstrumented\nfun:vmsplice=uninstrumented\nfun:vprintf=uninstrumented\nfun:vscanf=uninstrumented\nfun:vsnprintf=uninstrumented\nfun:vsprintf=uninstrumented\nfun:vsscanf=uninstrumented\nfun:vswprintf=uninstrumented\nfun:vswscanf=uninstrumented\nfun:vsyslog=uninstrumented\nfun:vtimes=uninstrumented\nfun:vwarn=uninstrumented\nfun:vwarnx=uninstrumented\nfun:vwprintf=uninstrumented\nfun:vwscanf=uninstrumented\nfun:wait=uninstrumented\nfun:wait3=uninstrumented\nfun:wait4=uninstrumented\nfun:waitid=uninstrumented\nfun:waitpid=uninstrumented\nfun:warn=uninstrumented\nfun:warnx=uninstrumented\nfun:wcpcpy=uninstrumented\nfun:wcpncpy=uninstrumented\nfun:wcrtomb=uninstrumented\nfun:wcscasecmp=uninstrumented\nfun:wcscasecmp_l=uninstrumented\nfun:wcscat=uninstrumented\nfun:wcschr=uninstrumented\nfun:wcschrnul=uninstrumented\nfun:wcscmp=uninstrumented\nfun:wcscoll=uninstrumented\nfun:wcscoll_l=uninstrumented\nfun:wcscpy=uninstrumented\nfun:wcscspn=uninstrumented\nfun:wcsdup=uninstrumented\nfun:wcsftime=uninstrumented\nfun:wcsftime_l=uninstrumented\nfun:wcslcat=uninstrumented\nfun:wcslcpy=uninstrumented\nfun:wcslen=uninstrumented\nfun:wcsncasecmp=uninstrumented\nfun:wcsncasecmp_l=uninstrumented\nfun:wcsncat=uninstrumented\nfun:wcsncmp=uninstrumented\nfun:wcsncpy=uninstrumented\nfun:wcsnlen=uninstrumented\nfun:wcsnrtombs=uninstrumented\nfun:wcspbrk=uninstrumented\nfun:wcsrchr=uninstrumented\nfun:wcsrtombs=uninstrumented\nfun:wcsspn=uninstrumented\nfun:wcsstr=uninstrumented\nfun:wcstod=uninstrumented\nfun:wcstod_l=uninstrumented\nfun:wcstof=uninstrumented\nfun:wcstof128=uninstrumented\nfun:wcstof128_l=uninstrumented\nfun:wcstof32=uninstrumented\nfun:wcstof32_l=uninstrumented\nfun:wcstof32x=uninstrumented\nfun:wcstof32x_l=uninstrumented\nfun:wcstof64=uninstrumented\nfun:wcstof64_l=uninstrumented\nfun:wcstof64x=uninstrumented\nfun:wcstof64x_l=uninstrumented\nfun:wcstof_l=uninstrumented\nfun:wcstoimax=uninstrumented\nfun:wcstok=uninstrumented\nfun:wcstol=uninstrumented\nfun:wcstol_l=uninstrumented\nfun:wcstold=uninstrumented\nfun:wcstold_l=uninstrumented\nfun:wcstoll=uninstrumented\nfun:wcstoll_l=uninstrumented\nfun:wcstombs=uninstrumented\nfun:wcstoq=uninstrumented\nfun:wcstoul=uninstrumented\nfun:wcstoul_l=uninstrumented\nfun:wcstoull=uninstrumented\nfun:wcstoull_l=uninstrumented\nfun:wcstoumax=uninstrumented\nfun:wcstouq=uninstrumented\nfun:wcswcs=uninstrumented\nfun:wcswidth=uninstrumented\nfun:wcsxfrm=uninstrumented\nfun:wcsxfrm_l=uninstrumented\nfun:wctob=uninstrumented\nfun:wctomb=uninstrumented\nfun:wctrans=uninstrumented\nfun:wctrans_l=uninstrumented\nfun:wctype=uninstrumented\nfun:wctype_l=uninstrumented\nfun:wcwidth=uninstrumented\nfun:wmemchr=uninstrumented\nfun:wmemcmp=uninstrumented\nfun:wmemcpy=uninstrumented\nfun:wmemmove=uninstrumented\nfun:wmempcpy=uninstrumented\nfun:wmemset=uninstrumented\nfun:wordexp=uninstrumented\nfun:wordfree=uninstrumented\nfun:wprintf=uninstrumented\nfun:write=uninstrumented\nfun:writeColdStartFile=uninstrumented\nfun:writev=uninstrumented\nfun:wscanf=uninstrumented\nfun:xcrypt=uninstrumented\nfun:xcrypt_gensalt=uninstrumented\nfun:xcrypt_gensalt_r=uninstrumented\nfun:xcrypt_r=uninstrumented\nfun:xdecrypt=uninstrumented\nfun:xdr_accepted_reply=uninstrumented\nfun:xdr_array=uninstrumented\nfun:xdr_authdes_cred=uninstrumented\nfun:xdr_authdes_verf=uninstrumented\nfun:xdr_authunix_parms=uninstrumented\nfun:xdr_bool=uninstrumented\nfun:xdr_bytes=uninstrumented\nfun:xdr_callhdr=uninstrumented\nfun:xdr_callmsg=uninstrumented\nfun:xdr_cback_data=uninstrumented\nfun:xdr_char=uninstrumented\nfun:xdr_cryptkeyarg=uninstrumented\nfun:xdr_cryptkeyarg2=uninstrumented\nfun:xdr_cryptkeyres=uninstrumented\nfun:xdr_des_block=uninstrumented\nfun:xdr_domainname=uninstrumented\nfun:xdr_double=uninstrumented\nfun:xdr_enum=uninstrumented\nfun:xdr_float=uninstrumented\nfun:xdr_free=uninstrumented\nfun:xdr_getcredres=uninstrumented\nfun:xdr_hyper=uninstrumented\nfun:xdr_int=uninstrumented\nfun:xdr_int16_t=uninstrumented\nfun:xdr_int32_t=uninstrumented\nfun:xdr_int64_t=uninstrumented\nfun:xdr_int8_t=uninstrumented\nfun:xdr_key_netstarg=uninstrumented\nfun:xdr_key_netstres=uninstrumented\nfun:xdr_keybuf=uninstrumented\nfun:xdr_keydat=uninstrumented\nfun:xdr_keystatus=uninstrumented\nfun:xdr_long=uninstrumented\nfun:xdr_longlong_t=uninstrumented\nfun:xdr_mapname=uninstrumented\nfun:xdr_netnamestr=uninstrumented\nfun:xdr_netobj=uninstrumented\nfun:xdr_obj_p=uninstrumented\nfun:xdr_opaque=uninstrumented\nfun:xdr_opaque_auth=uninstrumented\nfun:xdr_peername=uninstrumented\nfun:xdr_pmap=uninstrumented\nfun:xdr_pmaplist=uninstrumented\nfun:xdr_pointer=uninstrumented\nfun:xdr_quad_t=uninstrumented\nfun:xdr_reference=uninstrumented\nfun:xdr_rejected_reply=uninstrumented\nfun:xdr_replymsg=uninstrumented\nfun:xdr_rmtcall_args=uninstrumented\nfun:xdr_rmtcallres=uninstrumented\nfun:xdr_short=uninstrumented\nfun:xdr_sizeof=uninstrumented\nfun:xdr_string=uninstrumented\nfun:xdr_u_char=uninstrumented\nfun:xdr_u_hyper=uninstrumented\nfun:xdr_u_int=uninstrumented\nfun:xdr_u_long=uninstrumented\nfun:xdr_u_longlong_t=uninstrumented\nfun:xdr_u_quad_t=uninstrumented\nfun:xdr_u_short=uninstrumented\nfun:xdr_uint16_t=uninstrumented\nfun:xdr_uint32_t=uninstrumented\nfun:xdr_uint64_t=uninstrumented\nfun:xdr_uint8_t=uninstrumented\nfun:xdr_union=uninstrumented\nfun:xdr_unixcred=uninstrumented\nfun:xdr_valdat=uninstrumented\nfun:xdr_vector=uninstrumented\nfun:xdr_void=uninstrumented\nfun:xdr_wrapstring=uninstrumented\nfun:xdr_yp_buf=uninstrumented\nfun:xdr_ypall=uninstrumented\nfun:xdr_ypbind_binding=uninstrumented\nfun:xdr_ypbind_resp=uninstrumented\nfun:xdr_ypbind_resptype=uninstrumented\nfun:xdr_ypbind_setdom=uninstrumented\nfun:xdr_ypdelete_args=uninstrumented\nfun:xdr_ypmap_parms=uninstrumented\nfun:xdr_ypmaplist=uninstrumented\nfun:xdr_yppush_status=uninstrumented\nfun:xdr_yppushresp_xfr=uninstrumented\nfun:xdr_ypreq_key=uninstrumented\nfun:xdr_ypreq_nokey=uninstrumented\nfun:xdr_ypreq_xfr=uninstrumented\nfun:xdr_ypresp_all=uninstrumented\nfun:xdr_ypresp_key_val=uninstrumented\nfun:xdr_ypresp_maplist=uninstrumented\nfun:xdr_ypresp_master=uninstrumented\nfun:xdr_ypresp_order=uninstrumented\nfun:xdr_ypresp_val=uninstrumented\nfun:xdr_ypresp_xfr=uninstrumented\nfun:xdr_ypstat=uninstrumented\nfun:xdr_ypupdate_args=uninstrumented\nfun:xdr_ypxfrstat=uninstrumented\nfun:xdrmem_create=uninstrumented\nfun:xdrrec_create=uninstrumented\nfun:xdrrec_endofrecord=uninstrumented\nfun:xdrrec_eof=uninstrumented\nfun:xdrrec_skiprecord=uninstrumented\nfun:xdrstdio_create=uninstrumented\nfun:xencrypt=uninstrumented\nfun:xprt_register=uninstrumented\nfun:xprt_unregister=uninstrumented\nfun:y0=uninstrumented\nfun:y0f=uninstrumented\nfun:y0f128=uninstrumented\nfun:y0f32=uninstrumented\nfun:y0f32x=uninstrumented\nfun:y0f64=uninstrumented\nfun:y0f64x=uninstrumented\nfun:y0l=uninstrumented\nfun:y1=uninstrumented\nfun:y1f=uninstrumented\nfun:y1f128=uninstrumented\nfun:y1f32=uninstrumented\nfun:y1f32x=uninstrumented\nfun:y1f64=uninstrumented\nfun:y1f64x=uninstrumented\nfun:y1l=uninstrumented\nfun:yn=uninstrumented\nfun:ynf=uninstrumented\nfun:ynf128=uninstrumented\nfun:ynf32=uninstrumented\nfun:ynf32x=uninstrumented\nfun:ynf64=uninstrumented\nfun:ynf64x=uninstrumented\nfun:ynl=uninstrumented\nfun:yp_all=uninstrumented\nfun:yp_bind=uninstrumented\nfun:yp_first=uninstrumented\nfun:yp_get_default_domain=uninstrumented\nfun:yp_maplist=uninstrumented\nfun:yp_master=uninstrumented\nfun:yp_match=uninstrumented\nfun:yp_next=uninstrumented\nfun:yp_order=uninstrumented\nfun:yp_unbind=uninstrumented\nfun:yp_update=uninstrumented\nfun:ypbinderr_string=uninstrumented\nfun:yperr_string=uninstrumented\nfun:ypprot_err=uninstrumented\n"
  },
  {
    "path": "runtime/dfsan/scripts/build-libc-list.py",
    "content": "#!/usr/bin/env python\n#===- lib/dfsan/scripts/build-libc-list.py ---------------------------------===#\n#\n# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n#===------------------------------------------------------------------------===#\n# The purpose of this script is to identify every function symbol in a set of\n# libraries (in this case, libc and libgcc) so that they can be marked as\n# uninstrumented, thus allowing the instrumentation pass to treat calls to those\n# functions correctly.\n\nimport os\nimport subprocess\nimport sys\nfrom optparse import OptionParser\n\ndef defined_function_list(object):\n  functions = []\n  readelf_proc = subprocess.Popen(['readelf', '-s', '-W', object],\n                                  stdout=subprocess.PIPE,\n                                  universal_newlines=True)\n  readelf = readelf_proc.communicate()[0].split('\\n')\n  if readelf_proc.returncode != 0:\n    raise subprocess.CalledProcessError(readelf_proc.returncode, 'readelf')\n  for line in readelf:\n    if (line[31:35] == 'FUNC' or line[31:36] == 'IFUNC') and \\\n       line[39:44] != 'LOCAL' and \\\n       line[55:58] != 'UND':\n      function_name = line[59:].split('@')[0]\n      functions.append(function_name)\n  return functions\n\np = OptionParser()\n\np.add_option('--libc-dso-path', metavar='PATH',\n             help='path to libc DSO directory',\n             default='/lib/x86_64-linux-gnu')\np.add_option('--libc-archive-path', metavar='PATH',\n             help='path to libc archive directory',\n             default='/usr/lib/x86_64-linux-gnu')\n\np.add_option('--libgcc-dso-path', metavar='PATH',\n             help='path to libgcc DSO directory',\n             default='/lib/x86_64-linux-gnu')\np.add_option('--libgcc-archive-path', metavar='PATH',\n             help='path to libgcc archive directory',\n             default='/usr/lib/gcc/x86_64-linux-gnu/11')\n\np.add_option('--with-libstdcxx', action='store_true',\n             dest='with_libstdcxx',\n             help='include libstdc++ in the list (inadvisable)')\np.add_option('--libstdcxx-dso-path', metavar='PATH',\n             help='path to libstdc++ DSO directory',\n             default='/usr/lib/x86_64-linux-gnu')\n\np.add_option('--with-libcxx', action='store_true',\n            dest='with_libcxx',\n            help='include libc++ in the list (inadvisable)')\np.add_option('--libcxx-dso-path', metavar='PATH',\n             help='path to libc++ DSO directory',\n             default='/usr/lib/x86_64-linux-gnu')\n\n\n(options, args) = p.parse_args()\n\nlibs = [os.path.join(options.libc_dso_path, name) for name in\n        ['ld-linux-x86-64.so.2',\n         'libanl.so.1',\n         'libBrokenLocale.so.1',\n         'libcidn.so.1',\n         'libcrypt.so.1',\n         'libc.so.6',\n         'libdl.so.2',\n         'libm.so.6',\n         'libnsl.so.1',\n         'libpthread.so.0',\n         'libresolv.so.2',\n         'librt.so.1',\n         'libthread_db.so.1',\n         'libutil.so.1']]\nlibs += [os.path.join(options.libc_archive_path, name) for name in\n         ['libc_nonshared.a',\n          'libpthread_nonshared.a']]\n\nlibs.append(os.path.join(options.libgcc_dso_path, 'libgcc_s.so.1'))\nlibs.append(os.path.join(options.libgcc_archive_path, 'libgcc.a'))\n\nif options.with_libstdcxx:\n  libs.append(os.path.join(options.libstdcxx_dso_path, 'libstdc++.so.6'))\n\nif options.with_libcxx:\n  libs.append(os.path.join(options.libcxx_dso_path, 'libc++abi.so.1'))\n  libs.append(os.path.join(options.libcxx_dso_path, 'libc++.so.1'))\n\nfunctions = []\nfor l in libs:\n  if os.path.exists(l):\n    functions += defined_function_list(l)\n  else:\n    print('warning: library %s not found' % l, file=sys.stderr)\n\nfunctions = list(set(functions))\nfunctions.sort()\n\nfor f in functions:\n  print('fun:%s=uninstrumented' % f)\n"
  },
  {
    "path": "runtime/dfsan/scripts/check_custom_wrappers.sh",
    "content": "#!/bin/sh\n\nDFSAN_DIR=$(dirname \"$0\")/../\nDFSAN_CUSTOM_TESTS=${DFSAN_DIR}/../../test/dfsan/custom.cc\nDFSAN_CUSTOM_WRAPPERS=${DFSAN_DIR}/dfsan_custom.cc\nDFSAN_ABI_LIST=${DFSAN_DIR}/done_abilist.txt\n\nDIFFOUT=$(mktemp -q /tmp/tmp.XXXXXXXXXX)\nERRORLOG=$(mktemp -q /tmp/tmp.XXXXXXXXXX)\nDIFF_A=$(mktemp -q /tmp/tmp.XXXXXXXXXX)\nDIFF_B=$(mktemp -q /tmp/tmp.XXXXXXXXXX)\n\non_exit() {\n  rm -f ${DIFFOUT} 2> /dev/null\n  rm -f ${ERRORLOG} 2> /dev/null\n  rm -f ${DIFF_A} 2> /dev/null\n  rm -f ${DIFF_B} 2> /dev/null\n}\n\n# Ignore __sanitizer_cov_trace* because they are implemented elsewhere.\ntrap on_exit EXIT\ngrep -E \"^fun:.*=custom\" ${DFSAN_ABI_LIST} \\\n  | grep -v \"dfsan_get_label\\|__sanitizer_cov_trace\" \\\n  | sed \"s/^fun:\\(.*\\)=custom.*/\\1/\" | sort > $DIFF_A\ngrep -E \"__dfsw.*\\(\" ${DFSAN_CUSTOM_WRAPPERS} \\\n  | grep -v \"__sanitizer_cov_trace\" \\\n  | sed \"s/.*__dfsw_\\(.*\\)(.*/\\1/\" | sort > $DIFF_B\ndiff -u $DIFF_A $DIFF_B > ${DIFFOUT}\nif [ $? -ne 0 ]\nthen\n  echo -n \"The following differences between the ABI list and \">> ${ERRORLOG}\n  echo \"the implemented custom wrappers have been found:\" >> ${ERRORLOG}\n  cat ${DIFFOUT} >> ${ERRORLOG}\nfi\n\ngrep -E __dfsw_ ${DFSAN_CUSTOM_WRAPPERS} \\\n  | grep -v \"__sanitizer_cov_trace\" \\\n  | sed \"s/.*__dfsw_\\([^(]*\\).*/\\1/\" | sort > $DIFF_A\ngrep -E \"^[[:space:]]*test_.*\\(\\);\" ${DFSAN_CUSTOM_TESTS} \\\n  | sed \"s/.*test_\\(.*\\)();/\\1/\" | sort > $DIFF_B\ndiff -u $DIFF_A $DIFF_B > ${DIFFOUT}\nif [ $? -ne 0 ]\nthen\n  echo -n \"The following differences between the implemented \" >> ${ERRORLOG}\n  echo \"custom wrappers and the tests have been found:\" >> ${ERRORLOG}\n  cat ${DIFFOUT} >> ${ERRORLOG}\nfi\n\nif [ -s ${ERRORLOG} ]\nthen\n  cat ${ERRORLOG}\n  exit 1\nfi\n\n"
  },
  {
    "path": "runtime/dfsan/taint.ld",
    "content": "OUTPUT_FORMAT(\"elf64-x86-64\", \"elf64-x86-64\",\n\t      \"elf64-x86-64\")\nOUTPUT_ARCH(i386:x86-64)\nENTRY(_start)\nSEARCH_DIR(\"=/usr/local/lib/x86_64-linux-gnu\"); SEARCH_DIR(\"=/lib/x86_64-linux-gnu\"); SEARCH_DIR(\"=/usr/lib/x86_64-linux-gnu\"); SEARCH_DIR(\"=/usr/local/lib64\"); SEARCH_DIR(\"=/lib64\"); SEARCH_DIR(\"=/usr/lib64\"); SEARCH_DIR(\"=/usr/local/lib\"); SEARCH_DIR(\"=/lib\"); SEARCH_DIR(\"=/usr/lib\"); SEARCH_DIR(\"=/usr/x86_64-linux-gnu/lib64\"); SEARCH_DIR(\"=/usr/x86_64-linux-gnu/lib\");\nSECTIONS\n{\n  /* Read-only sections, merged into text segment: */\n  PROVIDE (__executable_start = SEGMENT_START(\"text-segment\", 0x700000200000)); . = SEGMENT_START(\"text-segment\", 0x700000200000) + SIZEOF_HEADERS;\n  .interp         : { *(.interp) }\n  .note.gnu.build-id : { *(.note.gnu.build-id) }\n  .hash           : { *(.hash) }\n  .gnu.hash       : { *(.gnu.hash) }\n  .dynsym         : { *(.dynsym) }\n  .dynstr         : { *(.dynstr) }\n  .gnu.version    : { *(.gnu.version) }\n  .gnu.version_d  : { *(.gnu.version_d) }\n  .gnu.version_r  : { *(.gnu.version_r) }\n  .rela.dyn       :\n    {\n      *(.rela.init)\n      *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)\n      *(.rela.fini)\n      *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)\n      *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)\n      *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)\n      *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)\n      *(.rela.ctors)\n      *(.rela.dtors)\n      *(.rela.got)\n      *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)\n      *(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*)\n      *(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*)\n      *(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*)\n      *(.rela.ifunc)\n    }\n  .rela.plt       :\n    {\n      *(.rela.plt)\n      PROVIDE_HIDDEN (__rela_iplt_start = .);\n      *(.rela.iplt)\n      PROVIDE_HIDDEN (__rela_iplt_end = .);\n    }\n  .init           :\n  {\n    KEEP (*(SORT_NONE(.init)))\n  }\n  .plt            : { *(.plt) *(.iplt) }\n  .plt.got        : { *(.plt.got) }\n  .plt.bnd        : { *(.plt.bnd) }\n  .text           :\n  {\n    *(.text.unlikely .text.*_unlikely .text.unlikely.*)\n    *(.text.exit .text.exit.*)\n    *(.text.startup .text.startup.*)\n    *(.text.hot .text.hot.*)\n    *(.text .stub .text.* .gnu.linkonce.t.*)\n    /* .gnu.warning sections are handled specially by elf32.em.  */\n    *(.gnu.warning)\n  }\n  .fini           :\n  {\n    KEEP (*(SORT_NONE(.fini)))\n  }\n  PROVIDE (__etext = .);\n  PROVIDE (_etext = .);\n  PROVIDE (etext = .);\n  .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }\n  .rodata1        : { *(.rodata1) }\n  .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }\n  .eh_frame       : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }\n  .gcc_except_table   : ONLY_IF_RO { *(.gcc_except_table\n  .gcc_except_table.*) }\n  .gnu_extab   : ONLY_IF_RO { *(.gnu_extab*) }\n  /* These sections are generated by the Sun/Oracle C++ compiler.  */\n  .exception_ranges   : ONLY_IF_RO { *(.exception_ranges\n  .exception_ranges*) }\n  /* Adjust the address for the data segment.  We want to adjust up to\n     the same address within the page on the next page up.  */\n  . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));\n  /* Exception handling  */\n  .eh_frame       : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }\n  .gnu_extab      : ONLY_IF_RW { *(.gnu_extab) }\n  .gcc_except_table   : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }\n  .exception_ranges   : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }\n  /* Thread Local Storage sections  */\n  .tdata\t  : { *(.tdata .tdata.* .gnu.linkonce.td.*) }\n  .tbss\t\t  : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }\n  .preinit_array     :\n  {\n    PROVIDE_HIDDEN (__preinit_array_start = .);\n    KEEP (*(.preinit_array))\n    PROVIDE_HIDDEN (__preinit_array_end = .);\n  }\n  .init_array     :\n  {\n    PROVIDE_HIDDEN (__init_array_start = .);\n    KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))\n    KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))\n    PROVIDE_HIDDEN (__init_array_end = .);\n  }\n  .fini_array     :\n  {\n    PROVIDE_HIDDEN (__fini_array_start = .);\n    KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))\n    KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))\n    PROVIDE_HIDDEN (__fini_array_end = .);\n  }\n  .ctors          :\n  {\n    /* gcc uses crtbegin.o to find the start of\n       the constructors, so we make sure it is\n       first.  Because this is a wildcard, it\n       doesn't matter if the user does not\n       actually link against crtbegin.o; the\n       linker won't look for a file to match a\n       wildcard.  The wildcard also means that it\n       doesn't matter which directory crtbegin.o\n       is in.  */\n    KEEP (*crtbegin.o(.ctors))\n    KEEP (*crtbegin?.o(.ctors))\n    /* We don't want to include the .ctor section from\n       the crtend.o file until after the sorted ctors.\n       The .ctor section from the crtend file contains the\n       end of ctors marker and it must be last */\n    KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))\n    KEEP (*(SORT(.ctors.*)))\n    KEEP (*(.ctors))\n  }\n  .dtors          :\n  {\n    KEEP (*crtbegin.o(.dtors))\n    KEEP (*crtbegin?.o(.dtors))\n    KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))\n    KEEP (*(SORT(.dtors.*)))\n    KEEP (*(.dtors))\n  }\n  .jcr            : { KEEP (*(.jcr)) }\n  .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }\n  .dynamic        : { *(.dynamic) }\n  .got            : { *(.got) *(.igot) }\n  . = DATA_SEGMENT_RELRO_END (SIZEOF (.got.plt) >= 24 ? 24 : 0, .);\n  .got.plt        : { *(.got.plt)  *(.igot.plt) }\n  .data           :\n  {\n    *(.data .data.* .gnu.linkonce.d.*)\n    SORT(CONSTRUCTORS)\n  }\n  .data1          : { *(.data1) }\n  _edata = .; PROVIDE (edata = .);\n  . = .;\n  __bss_start = .;\n  .bss            :\n  {\n   *(.dynbss)\n   *(.bss .bss.* .gnu.linkonce.b.*)\n   *(COMMON)\n   /* Align here to ensure that the .bss section occupies space up to\n      _end.  Align after .bss to ensure correct alignment even if the\n      .bss section disappears because there are no input sections.\n      FIXME: Why do we need it? When there is no .bss section, we don't\n      pad the .data section.  */\n   . = ALIGN(. != 0 ? 64 / 8 : 1);\n  }\n  .lbss   :\n  {\n    *(.dynlbss)\n    *(.lbss .lbss.* .gnu.linkonce.lb.*)\n    *(LARGE_COMMON)\n  }\n  . = ALIGN(64 / 8);\n  . = SEGMENT_START(\"ldata-segment\", .);\n  .lrodata   ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :\n  {\n    *(.lrodata .lrodata.* .gnu.linkonce.lr.*)\n  }\n  .ldata   ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :\n  {\n    *(.ldata .ldata.* .gnu.linkonce.l.*)\n    . = ALIGN(. != 0 ? 64 / 8 : 1);\n  }\n  . = ALIGN(64 / 8);\n  _end = .; PROVIDE (end = .);\n  . = DATA_SEGMENT_END (.);\n  /* Stabs debugging sections.  */\n  .stab          0 : { *(.stab) }\n  .stabstr       0 : { *(.stabstr) }\n  .stab.excl     0 : { *(.stab.excl) }\n  .stab.exclstr  0 : { *(.stab.exclstr) }\n  .stab.index    0 : { *(.stab.index) }\n  .stab.indexstr 0 : { *(.stab.indexstr) }\n  .comment       0 : { *(.comment) }\n  /* DWARF debug sections.\n     Symbols in the DWARF debugging sections are relative to the beginning\n     of the section so we begin them at 0.  */\n  /* DWARF 1 */\n  .debug          0 : { *(.debug) }\n  .line           0 : { *(.line) }\n  /* GNU DWARF 1 extensions */\n  .debug_srcinfo  0 : { *(.debug_srcinfo) }\n  .debug_sfnames  0 : { *(.debug_sfnames) }\n  /* DWARF 1.1 and DWARF 2 */\n  .debug_aranges  0 : { *(.debug_aranges) }\n  .debug_pubnames 0 : { *(.debug_pubnames) }\n  /* DWARF 2 */\n  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }\n  .debug_abbrev   0 : { *(.debug_abbrev) }\n  .debug_line     0 : { *(.debug_line .debug_line.* .debug_line_end ) }\n  .debug_frame    0 : { *(.debug_frame) }\n  .debug_str      0 : { *(.debug_str) }\n  .debug_loc      0 : { *(.debug_loc) }\n  .debug_macinfo  0 : { *(.debug_macinfo) }\n  /* SGI/MIPS DWARF 2 extensions */\n  .debug_weaknames 0 : { *(.debug_weaknames) }\n  .debug_funcnames 0 : { *(.debug_funcnames) }\n  .debug_typenames 0 : { *(.debug_typenames) }\n  .debug_varnames  0 : { *(.debug_varnames) }\n  /* DWARF 3 */\n  .debug_pubtypes 0 : { *(.debug_pubtypes) }\n  .debug_ranges   0 : { *(.debug_ranges) }\n  /* DWARF Extension.  */\n  .debug_macro    0 : { *(.debug_macro) }\n  .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }\n  /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }\n}\n\n"
  },
  {
    "path": "runtime/dfsan/taint_allocator.cpp",
    "content": "#include \"../sanitizer_common/sanitizer_atomic.h\"\n#include \"../sanitizer_common/sanitizer_common.h\"\n#include \"dfsan.h\"\n#include \"taint_allocator.h\"\n\nusing namespace __sanitizer;\n\nnamespace __taint {\n\nstatic uptr begin_addr;\nstatic atomic_uint64_t next_usable_byte;\nstatic uptr end_addr;\n\n/**\n * Initialize allocator memory,\n * begin: first usable byte\n * end: first unusable byte\n */\n\nvoid allocator_init(uptr begin, uptr end) {\n  begin_addr = begin;\n  atomic_store_relaxed(&next_usable_byte, begin);\n  end_addr = end;\n}\n\nvoid *allocator_alloc(uptr size) {\n  if (begin_addr == 0) {\n    Report(\"FATAL: Allocator not initialized\\n\");\n    Die();\n  }\n  uptr retval = atomic_fetch_add(&next_usable_byte, size, memory_order_relaxed);\n  if (retval + size >= end_addr) {\n    Report(\"FATAL: Allocate size exceeded\\n\");\n    Die();\n  }\n  return reinterpret_cast<void *>(retval);\n}\n\nvoid\nallocator_dealloc(uptr addr) {\n  // do nothing for now\n}\n\n} // namespace\n"
  },
  {
    "path": "runtime/dfsan/taint_allocator.h",
    "content": "#ifndef UNION_ALLOCATOR_H\n#define UNION_ALLOCATOR_H\n\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n\nusing __sanitizer::uptr;\n\nnamespace __taint {\n\nvoid allocator_init(uptr begin, uptr end);\nvoid *allocator_alloc(uptr size);\nvoid allocator_dealloc(uptr addr);\n\n} // namespace\n\n#endif // UNION_ALLOCATOR_H\n"
  },
  {
    "path": "runtime/dfsan/union_hashtable.cpp",
    "content": "#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"union_hashtable.h\"\n#include \"union_util.h\"\n\nusing namespace __taint;\n\nunion_hashtable::union_hashtable(uint64_t n) {\n  bucket_size = n;\n  bucket = reinterpret_cast<atomic_uintptr_t*>(\n      allocator_alloc(n * sizeof(atomic_uintptr_t)));\n  __sanitizer::internal_memset(bucket, 0, n * sizeof(atomic_uintptr_t));\n}\n\nuint32_t\nunion_hashtable::hash(const dfsan_label_info &key) {\n  return key.hash & (bucket_size - 1);\n}\n\nvoid\nunion_hashtable::insert(dfsan_label_info *key, dfsan_label entry) {\n  uint32_t index = hash(*key);\n  auto curr = (struct union_hashtable_entry *)\n      allocator_alloc(sizeof(struct union_hashtable_entry));\n  curr->key = key; curr->entry = entry;\n  uptr p = atomic_load(&bucket[index], memory_order_acquire);\n  while (true) {\n    curr->next = reinterpret_cast<struct union_hashtable_entry *>(p);\n    if (atomic_compare_exchange_strong(&bucket[index], &p, (uptr)curr,\n                                       memory_order_seq_cst))\n      break; // spin until succeed, when fail, p will contain the current head\n  }\n}\n\noption\nunion_hashtable::lookup(const dfsan_label_info &key) {\n  uint64_t index = hash(key);\n  uptr p = atomic_load(&bucket[index], memory_order_acquire);\n  auto curr = reinterpret_cast<struct union_hashtable_entry *>(p);\n  while (curr) {\n    if (*(curr->key) == key) {\n      return some_dfsan_label(curr->entry);\n    }\n    curr = curr->next; // no data race here\n  }\n  return none();\n}\n"
  },
  {
    "path": "runtime/dfsan/union_hashtable.h",
    "content": "#ifndef UNION_HASHTABLE_H\n#define UNION_HASHTABLE_H\n\n#include <stdint.h>\n#include \"sanitizer_common/sanitizer_atomic.h\"\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n#include \"taint_allocator.h\"\n#include \"union_util.h\"\n#include \"dfsan.h\"\n\nusing __sanitizer::atomic_uintptr_t;\nusing __sanitizer::atomic_load;\nusing __sanitizer::atomic_compare_exchange_strong;\nusing __sanitizer::memory_order_acquire;\nusing __sanitizer::memory_order_seq_cst;\n\nnamespace __taint {\n\nstruct union_hashtable_entry {\n  dfsan_label_info *key;\n  dfsan_label entry;\n  struct union_hashtable_entry *next;\n};\n\nclass union_hashtable {\n  atomic_uintptr_t *bucket;\n  uint64_t bucket_size;\n  uint32_t hash(const dfsan_label_info &key);\npublic:\n  union_hashtable(uint64_t n);\n  void insert(dfsan_label_info *key, dfsan_label value);\n  option lookup(const dfsan_label_info &key);\n};\n\n}\n\n#endif\n"
  },
  {
    "path": "runtime/dfsan/union_util.cpp",
    "content": "#include \"union_util.h\"\n\nnamespace __taint {\n\n/**\n * Initialize allocator memory,\n * begin: first usable byte\n * end: first unusable byte\n */\n\noption::option(bool isa, dfsan_label l) {\n  this->isa = isa;\n  this->content = l;\n}\n\noption some_dfsan_label(dfsan_label x) {\n  return option(true, x);\n}\n\noption none() {\n  return option(false, 0);\n}\n\nbool\noption::operator==(option rhs) {\n    if (isa == false) {\n          return rhs.isa == false;\n            }\n      return rhs.isa != false && content == rhs.content;\n}\n\nbool\noption::operator!=(option rhs) {\n  return !(*this == rhs);\n}\n\ndfsan_label\noption::operator*() {\n    return this->content;\n}\n\nbool\noperator==(const dfsan_label_info& lhs, const dfsan_label_info& rhs) {\n  return lhs.l1 == rhs.l1\n      && lhs.l2 == rhs.l2\n      && lhs.op == rhs.op\n      && lhs.size == rhs.size\n      && lhs.op1.i == rhs.op1.i\n      && lhs.op2.i == rhs.op2.i;\n}\n\n}\n"
  },
  {
    "path": "runtime/dfsan/union_util.h",
    "content": "#ifndef UNION_UTIL_H\n#define UNION_UTIL_H\n\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n#include \"dfsan.h\"\n\nusing __sanitizer::uptr;\nusing __sanitizer::u32;\n\nnamespace __taint {\n\nclass option {\n  bool isa;\n  dfsan_label content;\npublic:\n  option(bool, dfsan_label);\n  bool operator==(option rhs);\n  bool operator!=(option rhs);\n  dfsan_label operator*();\n};\n\noption some_dfsan_label(dfsan_label x);\noption none();\n\nbool operator==(const dfsan_label_info& lhs, \n    const dfsan_label_info& rhs);\n\n} // namespace\n\n#endif // UNION_UTIL_H\n"
  },
  {
    "path": "runtime/interception/.clang-format",
    "content": "BasedOnStyle: Google\nAllowShortIfStatementsOnASingleLine: false\nIndentPPDirectives: AfterHash\n"
  },
  {
    "path": "runtime/interception/CMakeLists.txt",
    "content": "# Build for the runtime interception helper library.\n\nset(INTERCEPTION_SOURCES\n  interception_linux.cpp\n  interception_mac.cpp\n  interception_win.cpp\n  interception_type_test.cpp\n  )\n\nset(INTERCEPTION_HEADERS\n  interception.h\n  interception_linux.h\n  interception_mac.h\n  interception_win.h\n  )\n\ninclude_directories(..)\n\nset(INTERCEPTION_CFLAGS ${SANITIZER_COMMON_CFLAGS})\nappend_rtti_flag(OFF INTERCEPTION_CFLAGS)\n\n# Silence warnings in system headers with MSVC.\nif(NOT CLANG_CL)\n  append_list_if(COMPILER_RT_HAS_EXTERNAL_FLAG \"/experimental:external /external:W0 /external:anglebrackets\" INTERCEPTION_CFLAGS)\nendif()\n\nadd_compiler_rt_object_libraries(RTInterception\n    OS ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES ${INTERCEPTION_SOURCES}\n    ADDITIONAL_HEADERS ${INTERCEPTION_HEADERS}\n    CFLAGS ${INTERCEPTION_CFLAGS})\n\nif(COMPILER_RT_INCLUDE_TESTS)\n  add_subdirectory(tests)\nendif()\n"
  },
  {
    "path": "runtime/interception/interception.h",
    "content": "//===-- interception.h ------------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Machinery for providing replacements/wrappers for system functions.\n//===----------------------------------------------------------------------===//\n\n#ifndef INTERCEPTION_H\n#define INTERCEPTION_H\n\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n\n#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC &&      \\\n    !SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \\\n    !SANITIZER_SOLARIS\n#  error \"Interception doesn't work on this operating system.\"\n#endif\n\n// These typedefs should be used only in the interceptor definitions to replace\n// the standard system types (e.g. SSIZE_T instead of ssize_t)\ntypedef __sanitizer::uptr    SIZE_T;\ntypedef __sanitizer::sptr    SSIZE_T;\ntypedef __sanitizer::sptr    PTRDIFF_T;\ntypedef __sanitizer::s64     INTMAX_T;\ntypedef __sanitizer::u64     UINTMAX_T;\ntypedef __sanitizer::OFF_T   OFF_T;\ntypedef __sanitizer::OFF64_T OFF64_T;\n\n// How to add an interceptor:\n// Suppose you need to wrap/replace system function (generally, from libc):\n//      int foo(const char *bar, double baz);\n// You'll need to:\n//      1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in\n//         your source file. See the notes below for cases when\n//         INTERCEPTOR_WITH_SUFFIX(...) should be used instead.\n//      2) Call \"INTERCEPT_FUNCTION(foo)\" prior to the first call of \"foo\".\n//         INTERCEPT_FUNCTION(foo) evaluates to \"true\" iff the function was\n//         intercepted successfully.\n// You can access original function by calling REAL(foo)(bar, baz).\n// By default, REAL(foo) will be visible only inside your interceptor, and if\n// you want to use it in other parts of RTL, you'll need to:\n//      3a) add DECLARE_REAL(int, foo, const char*, double) to a\n//          header file.\n// However, if the call \"INTERCEPT_FUNCTION(foo)\" and definition for\n// INTERCEPTOR(..., foo, ...) are in different files, you'll instead need to:\n//      3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)\n//          to a header file.\n\n// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or\n//           DECLARE_REAL(...) are located inside namespaces.\n//        2. On Mac you can also use: \"OVERRIDE_FUNCTION(foo, zoo)\" to\n//           effectively redirect calls from \"foo\" to \"zoo\". In this case\n//           you aren't required to implement\n//           INTERCEPTOR(int, foo, const char *bar, double baz) {...}\n//           but instead you'll have to add\n//           DECLARE_REAL(int, foo, const char *bar, double baz) in your\n//           source file (to define a pointer to overriden function).\n//        3. Some Mac functions have symbol variants discriminated by\n//           additional suffixes, e.g. _$UNIX2003 (see\n//           https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html\n//           for more details). To intercept such functions you need to use the\n//           INTERCEPTOR_WITH_SUFFIX(...) macro.\n\n// How it works:\n// To replace system functions on Linux we just need to declare functions\n// with same names in our library and then obtain the real function pointers\n// using dlsym().\n// There is one complication. A user may also intercept some of the functions\n// we intercept. To resolve this we declare our interceptors with __interceptor_\n// prefix, and then make actual interceptors weak aliases to __interceptor_\n// functions.\n//\n// This is not so on Mac OS, where the two-level namespace makes\n// our replacement functions invisible to other libraries. This may be overcomed\n// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared\n// libraries in Chromium were noticed when doing so.\n// Instead we create a dylib containing a __DATA,__interpose section that\n// associates library functions with their wrappers. When this dylib is\n// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all\n// the calls to interposed functions done through stubs to the wrapper\n// functions.\n// As it's decided at compile time which functions are to be intercepted on Mac,\n// INTERCEPT_FUNCTION() is effectively a no-op on this system.\n\n#if SANITIZER_MAC\n#include <sys/cdefs.h>  // For __DARWIN_ALIAS_C().\n\n// Just a pair of pointers.\nstruct interpose_substitution {\n  const __sanitizer::uptr replacement;\n  const __sanitizer::uptr original;\n};\n\n// For a function foo() create a global pair of pointers { wrap_foo, foo } in\n// the __DATA,__interpose section.\n// As a result all the calls to foo() will be routed to wrap_foo() at runtime.\n#define INTERPOSER(func_name) __attribute__((used)) \\\nconst interpose_substitution substitution_##func_name[] \\\n    __attribute__((section(\"__DATA, __interpose\"))) = { \\\n    { reinterpret_cast<const uptr>(WRAP(func_name)), \\\n      reinterpret_cast<const uptr>(func_name) } \\\n}\n\n// For a function foo() and a wrapper function bar() create a global pair\n// of pointers { bar, foo } in the __DATA,__interpose section.\n// As a result all the calls to foo() will be routed to bar() at runtime.\n#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \\\nconst interpose_substitution substitution_##func_name[] \\\n    __attribute__((section(\"__DATA, __interpose\"))) = { \\\n    { reinterpret_cast<const uptr>(wrapper_name), \\\n      reinterpret_cast<const uptr>(func_name) } \\\n}\n\n# define WRAP(x) wrap_##x\n# define WRAPPER_NAME(x) \"wrap_\"#x\n# define INTERCEPTOR_ATTRIBUTE\n# define DECLARE_WRAPPER(ret_type, func, ...)\n\n#elif SANITIZER_WINDOWS\n# define WRAP(x) __asan_wrap_##x\n# define WRAPPER_NAME(x) \"__asan_wrap_\"#x\n# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)\n# define DECLARE_WRAPPER(ret_type, func, ...) \\\n    extern \"C\" ret_type func(__VA_ARGS__);\n# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \\\n    extern \"C\" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);\n#elif SANITIZER_FREEBSD || SANITIZER_NETBSD\n# define WRAP(x) __interceptor_ ## x\n# define WRAPPER_NAME(x) \"__interceptor_\" #x\n# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility(\"default\")))\n// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher\n// priority than weak ones so weak aliases won't work for indirect calls\n// in position-independent (-fPIC / -fPIE) mode.\n# define DECLARE_WRAPPER(ret_type, func, ...) \\\n     extern \"C\" ret_type func(__VA_ARGS__) \\\n     __attribute__((alias(\"__interceptor_\" #func), visibility(\"default\")));\n#elif !SANITIZER_FUCHSIA\n# define WRAP(x) __interceptor_ ## x\n# define WRAPPER_NAME(x) \"__interceptor_\" #x\n# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility(\"default\")))\n# define DECLARE_WRAPPER(ret_type, func, ...) \\\n    extern \"C\" ret_type func(__VA_ARGS__) \\\n    __attribute__((weak, alias(\"__interceptor_\" #func), visibility(\"default\")));\n#endif\n\n#if SANITIZER_FUCHSIA\n// There is no general interception at all on Fuchsia.\n// Sanitizer runtimes just define functions directly to preempt them,\n// and have bespoke ways to access the underlying libc functions.\n# include <zircon/sanitizer.h>\n# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility(\"default\")))\n# define REAL(x) __unsanitized_##x\n# define DECLARE_REAL(ret_type, func, ...)\n#elif !SANITIZER_MAC\n# define PTR_TO_REAL(x) real_##x\n# define REAL(x) __interception::PTR_TO_REAL(x)\n# define FUNC_TYPE(x) x##_type\n\n# define DECLARE_REAL(ret_type, func, ...) \\\n    typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \\\n    namespace __interception { \\\n      extern FUNC_TYPE(func) PTR_TO_REAL(func); \\\n    }\n# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)\n#else  // SANITIZER_MAC\n# define REAL(x) x\n# define DECLARE_REAL(ret_type, func, ...) \\\n    extern \"C\" ret_type func(__VA_ARGS__);\n# define ASSIGN_REAL(x, y)\n#endif  // SANITIZER_MAC\n\n#if !SANITIZER_FUCHSIA\n#  define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \\\n    DECLARE_REAL(ret_type, func, __VA_ARGS__)               \\\n    extern \"C\" ret_type WRAP(func)(__VA_ARGS__);\n// Declare an interceptor and its wrapper defined in a different translation\n// unit (ex. asm).\n# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)    \\\n  extern \"C\" ret_type WRAP(func)(__VA_ARGS__); \\\n  extern \"C\" ret_type func(__VA_ARGS__);\n#else\n# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)\n# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)\n#endif\n\n// Generally, you don't need to use DEFINE_REAL by itself, as INTERCEPTOR\n// macros does its job. In exceptional cases you may need to call REAL(foo)\n// without defining INTERCEPTOR(..., foo, ...). For example, if you override\n// foo with an interceptor for other function.\n#if !SANITIZER_MAC && !SANITIZER_FUCHSIA\n#  define DEFINE_REAL(ret_type, func, ...)            \\\n    typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \\\n    namespace __interception {                        \\\n    FUNC_TYPE(func) PTR_TO_REAL(func);                \\\n    }\n#else\n# define DEFINE_REAL(ret_type, func, ...)\n#endif\n\n#if SANITIZER_FUCHSIA\n\n// We need to define the __interceptor_func name just to get\n// sanitizer_common/scripts/gen_dynamic_list.py to export func.\n// But we don't need to export __interceptor_func to get that.\n#define INTERCEPTOR(ret_type, func, ...)                                \\\n  extern \"C\"[[ gnu::alias(#func), gnu::visibility(\"hidden\") ]] ret_type \\\n      __interceptor_##func(__VA_ARGS__);                                \\\n  extern \"C\" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)\n\n#elif !SANITIZER_MAC\n\n#define INTERCEPTOR(ret_type, func, ...) \\\n  DEFINE_REAL(ret_type, func, __VA_ARGS__) \\\n  DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \\\n  extern \"C\" \\\n  INTERCEPTOR_ATTRIBUTE \\\n  ret_type WRAP(func)(__VA_ARGS__)\n\n// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.\n#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \\\n  INTERCEPTOR(ret_type, func, __VA_ARGS__)\n\n#else  // SANITIZER_MAC\n\n#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \\\n  extern \"C\" ret_type func(__VA_ARGS__) suffix; \\\n  extern \"C\" ret_type WRAP(func)(__VA_ARGS__); \\\n  INTERPOSER(func); \\\n  extern \"C\" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)\n\n#define INTERCEPTOR(ret_type, func, ...) \\\n  INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__)\n\n#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \\\n  INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__)\n\n// Override |overridee| with |overrider|.\n#define OVERRIDE_FUNCTION(overridee, overrider) \\\n  INTERPOSER_2(overridee, WRAP(overrider))\n#endif\n\n#if SANITIZER_WINDOWS\n# define INTERCEPTOR_WINAPI(ret_type, func, ...) \\\n    typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \\\n    namespace __interception { \\\n      FUNC_TYPE(func) PTR_TO_REAL(func); \\\n    } \\\n    extern \"C\" \\\n    INTERCEPTOR_ATTRIBUTE \\\n    ret_type __stdcall WRAP(func)(__VA_ARGS__)\n#endif\n\n// ISO C++ forbids casting between pointer-to-function and pointer-to-object,\n// so we use casting via an integral type __interception::uptr,\n// assuming that system is POSIX-compliant. Using other hacks seem\n// challenging, as we don't even pass function type to\n// INTERCEPT_FUNCTION macro, only its name.\nnamespace __interception {\n#if defined(_WIN64)\ntypedef unsigned long long uptr;\n#else\ntypedef unsigned long uptr;\n#endif  // _WIN64\n}  // namespace __interception\n\n#define INCLUDED_FROM_INTERCEPTION_LIB\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n\n# include \"interception_linux.h\"\n# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)\n# define INTERCEPT_FUNCTION_VER(func, symver) \\\n    INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)\n#elif SANITIZER_MAC\n# include \"interception_mac.h\"\n# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)\n# define INTERCEPT_FUNCTION_VER(func, symver) \\\n    INTERCEPT_FUNCTION_VER_MAC(func, symver)\n#elif SANITIZER_WINDOWS\n# include \"interception_win.h\"\n# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)\n# define INTERCEPT_FUNCTION_VER(func, symver) \\\n    INTERCEPT_FUNCTION_VER_WIN(func, symver)\n#endif\n\n#undef INCLUDED_FROM_INTERCEPTION_LIB\n\n#endif  // INTERCEPTION_H\n"
  },
  {
    "path": "runtime/interception/interception_linux.cpp",
    "content": "//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Linux-specific interception methods.\n//===----------------------------------------------------------------------===//\n\n#include \"interception.h\"\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n\n#include <dlfcn.h>   // for dlsym() and dlvsym()\n\nnamespace __interception {\n\n#if SANITIZER_NETBSD\nstatic int StrCmp(const char *s1, const char *s2) {\n  while (true) {\n    if (*s1 != *s2)\n      return false;\n    if (*s1 == 0)\n      return true;\n    s1++;\n    s2++;\n  }\n}\n#endif\n\nstatic void *GetFuncAddr(const char *name, uptr wrapper_addr) {\n#if SANITIZER_NETBSD\n  // FIXME: Find a better way to handle renames\n  if (StrCmp(name, \"sigaction\"))\n    name = \"__sigaction14\";\n#endif\n  void *addr = dlsym(RTLD_NEXT, name);\n  if (!addr) {\n    // If the lookup using RTLD_NEXT failed, the sanitizer runtime library is\n    // later in the library search order than the DSO that we are trying to\n    // intercept, which means that we cannot intercept this function. We still\n    // want the address of the real definition, though, so look it up using\n    // RTLD_DEFAULT.\n    addr = dlsym(RTLD_DEFAULT, name);\n\n    // In case `name' is not loaded, dlsym ends up finding the actual wrapper.\n    // We don't want to intercept the wrapper and have it point to itself.\n    if ((uptr)addr == wrapper_addr)\n      addr = nullptr;\n  }\n  return addr;\n}\n\nbool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,\n                       uptr wrapper) {\n  void *addr = GetFuncAddr(name, wrapper);\n  *ptr_to_real = (uptr)addr;\n  return addr && (func == wrapper);\n}\n\n// dlvsym is a GNU extension supported by some other platforms.\n#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD\nstatic void *GetFuncAddr(const char *name, const char *ver) {\n  return dlvsym(RTLD_NEXT, name, ver);\n}\n\nbool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,\n                       uptr func, uptr wrapper) {\n  void *addr = GetFuncAddr(name, ver);\n  *ptr_to_real = (uptr)addr;\n  return addr && (func == wrapper);\n}\n#endif  // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD\n\n}  // namespace __interception\n\n#endif  // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||\n        // SANITIZER_SOLARIS\n"
  },
  {
    "path": "runtime/interception/interception_linux.h",
    "content": "//===-- interception_linux.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Linux-specific interception methods.\n//===----------------------------------------------------------------------===//\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n\n#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)\n# error \"interception_linux.h should be included from interception library only\"\n#endif\n\n#ifndef INTERCEPTION_LINUX_H\n#define INTERCEPTION_LINUX_H\n\nnamespace __interception {\nbool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,\n                       uptr wrapper);\nbool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,\n                       uptr func, uptr wrapper);\n}  // namespace __interception\n\n#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \\\n  ::__interception::InterceptFunction(            \\\n      #func,                                      \\\n      (::__interception::uptr *) & REAL(func),    \\\n      (::__interception::uptr) & (func),          \\\n      (::__interception::uptr) & WRAP(func))\n\n// dlvsym is a GNU extension supported by some other platforms.\n#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD\n#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \\\n  ::__interception::InterceptFunction(                        \\\n      #func, symver,                                          \\\n      (::__interception::uptr *) & REAL(func),                \\\n      (::__interception::uptr) & (func),                      \\\n      (::__interception::uptr) & WRAP(func))\n#else\n#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \\\n  INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)\n#endif  // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD\n\n#endif  // INTERCEPTION_LINUX_H\n#endif  // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||\n        // SANITIZER_SOLARIS\n"
  },
  {
    "path": "runtime/interception/interception_mac.cpp",
    "content": "//===-- interception_mac.cpp ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Mac-specific interception methods.\n//===----------------------------------------------------------------------===//\n\n#include \"interception.h\"\n\n#if SANITIZER_MAC\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/interception/interception_mac.h",
    "content": "//===-- interception_mac.h --------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Mac-specific interception methods.\n//===----------------------------------------------------------------------===//\n\n#if SANITIZER_MAC\n\n#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)\n# error \"interception_mac.h should be included from interception.h only\"\n#endif\n\n#ifndef INTERCEPTION_MAC_H\n#define INTERCEPTION_MAC_H\n\n#define INTERCEPT_FUNCTION_MAC(func)\n#define INTERCEPT_FUNCTION_VER_MAC(func, symver)\n\n#endif  // INTERCEPTION_MAC_H\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/interception/interception_type_test.cpp",
    "content": "//===-- interception_type_test.cpp ------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Compile-time tests of the internal type definitions.\n//===----------------------------------------------------------------------===//\n\n#include \"interception.h\"\n\n#if SANITIZER_LINUX || SANITIZER_MAC\n\n#include <sys/types.h>\n#include <stddef.h>\n#include <stdint.h>\n\nCOMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));\nCOMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));\nCOMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));\nCOMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));\n\n#if !SANITIZER_MAC\nCOMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));\n#endif\n\n// The following are the cases when pread (and friends) is used instead of\n// pread64. In those cases we need OFF_T to match off_t. We don't care about the\n// rest (they depend on _FILE_OFFSET_BITS setting when building an application).\n# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \\\n  _FILE_OFFSET_BITS != 64\nCOMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));\n# endif\n\n#endif\n"
  },
  {
    "path": "runtime/interception/interception_win.cpp",
    "content": "//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Windows-specific interception methods.\n//\n// This file is implementing several hooking techniques to intercept calls\n// to functions. The hooks are dynamically installed by modifying the assembly\n// code.\n//\n// The hooking techniques are making assumptions on the way the code is\n// generated and are safe under these assumptions.\n//\n// On 64-bit architecture, there is no direct 64-bit jump instruction. To allow\n// arbitrary branching on the whole memory space, the notion of trampoline\n// region is used. A trampoline region is a memory space withing 2G boundary\n// where it is safe to add custom assembly code to build 64-bit jumps.\n//\n// Hooking techniques\n// ==================\n//\n// 1) Detour\n//\n//    The Detour hooking technique is assuming the presence of an header with\n//    padding and an overridable 2-bytes nop instruction (mov edi, edi). The\n//    nop instruction can safely be replaced by a 2-bytes jump without any need\n//    to save the instruction. A jump to the target is encoded in the function\n//    header and the nop instruction is replaced by a short jump to the header.\n//\n//        head:  5 x nop                 head:  jmp <hook>\n//        func:  mov edi, edi    -->     func:  jmp short <head>\n//               [...]                   real:  [...]\n//\n//    This technique is only implemented on 32-bit architecture.\n//    Most of the time, Windows API are hookable with the detour technique.\n//\n// 2) Redirect Jump\n//\n//    The redirect jump is applicable when the first instruction is a direct\n//    jump. The instruction is replaced by jump to the hook.\n//\n//        func:  jmp <label>     -->     func:  jmp <hook>\n//\n//    On an 64-bit architecture, a trampoline is inserted.\n//\n//        func:  jmp <label>     -->     func:  jmp <tramp>\n//                                              [...]\n//\n//                                   [trampoline]\n//                                      tramp:  jmp QWORD [addr]\n//                                       addr:  .bytes <hook>\n//\n//    Note: <real> is equivalent to <label>.\n//\n// 3) HotPatch\n//\n//    The HotPatch hooking is assuming the presence of an header with padding\n//    and a first instruction with at least 2-bytes.\n//\n//    The reason to enforce the 2-bytes limitation is to provide the minimal\n//    space to encode a short jump. HotPatch technique is only rewriting one\n//    instruction to avoid breaking a sequence of instructions containing a\n//    branching target.\n//\n//    Assumptions are enforced by MSVC compiler by using the /HOTPATCH flag.\n//      see: https://msdn.microsoft.com/en-us/library/ms173507.aspx\n//    Default padding length is 5 bytes in 32-bits and 6 bytes in 64-bits.\n//\n//        head:   5 x nop                head:  jmp <hook>\n//        func:   <instr>        -->     func:  jmp short <head>\n//                [...]                  body:  [...]\n//\n//                                   [trampoline]\n//                                       real:  <instr>\n//                                              jmp <body>\n//\n//    On an 64-bit architecture:\n//\n//        head:   6 x nop                head:  jmp QWORD [addr1]\n//        func:   <instr>        -->     func:  jmp short <head>\n//                [...]                  body:  [...]\n//\n//                                   [trampoline]\n//                                      addr1:  .bytes <hook>\n//                                       real:  <instr>\n//                                              jmp QWORD [addr2]\n//                                      addr2:  .bytes <body>\n//\n// 4) Trampoline\n//\n//    The Trampoline hooking technique is the most aggressive one. It is\n//    assuming that there is a sequence of instructions that can be safely\n//    replaced by a jump (enough room and no incoming branches).\n//\n//    Unfortunately, these assumptions can't be safely presumed and code may\n//    be broken after hooking.\n//\n//        func:   <instr>        -->     func:  jmp <hook>\n//                <instr>\n//                [...]                  body:  [...]\n//\n//                                   [trampoline]\n//                                       real:  <instr>\n//                                              <instr>\n//                                              jmp <body>\n//\n//    On an 64-bit architecture:\n//\n//        func:   <instr>        -->     func:  jmp QWORD [addr1]\n//                <instr>\n//                [...]                  body:  [...]\n//\n//                                   [trampoline]\n//                                      addr1:  .bytes <hook>\n//                                       real:  <instr>\n//                                              <instr>\n//                                              jmp QWORD [addr2]\n//                                      addr2:  .bytes <body>\n//===----------------------------------------------------------------------===//\n\n#include \"interception.h\"\n\n#if SANITIZER_WINDOWS\n#include \"sanitizer_common/sanitizer_platform.h\"\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n\nnamespace __interception {\n\nstatic const int kAddressLength = FIRST_32_SECOND_64(4, 8);\nstatic const int kJumpInstructionLength = 5;\nstatic const int kShortJumpInstructionLength = 2;\nUNUSED static const int kIndirectJumpInstructionLength = 6;\nstatic const int kBranchLength =\n    FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);\nstatic const int kDirectBranchLength = kBranchLength + kAddressLength;\n\nstatic void InterceptionFailed() {\n  // Do we have a good way to abort with an error message here?\n  __debugbreak();\n}\n\nstatic bool DistanceIsWithin2Gig(uptr from, uptr target) {\n#if SANITIZER_WINDOWS64\n  if (from < target)\n    return target - from <= (uptr)0x7FFFFFFFU;\n  else\n    return from - target <= (uptr)0x80000000U;\n#else\n  // In a 32-bit address space, the address calculation will wrap, so this check\n  // is unnecessary.\n  return true;\n#endif\n}\n\nstatic uptr GetMmapGranularity() {\n  SYSTEM_INFO si;\n  GetSystemInfo(&si);\n  return si.dwAllocationGranularity;\n}\n\nUNUSED static uptr RoundUpTo(uptr size, uptr boundary) {\n  return (size + boundary - 1) & ~(boundary - 1);\n}\n\n// FIXME: internal_str* and internal_mem* functions should be moved from the\n// ASan sources into interception/.\n\nstatic size_t _strlen(const char *str) {\n  const char* p = str;\n  while (*p != '\\0') ++p;\n  return p - str;\n}\n\nstatic char* _strchr(char* str, char c) {\n  while (*str) {\n    if (*str == c)\n      return str;\n    ++str;\n  }\n  return nullptr;\n}\n\nstatic void _memset(void *p, int value, size_t sz) {\n  for (size_t i = 0; i < sz; ++i)\n    ((char*)p)[i] = (char)value;\n}\n\nstatic void _memcpy(void *dst, void *src, size_t sz) {\n  char *dst_c = (char*)dst,\n       *src_c = (char*)src;\n  for (size_t i = 0; i < sz; ++i)\n    dst_c[i] = src_c[i];\n}\n\nstatic bool ChangeMemoryProtection(\n    uptr address, uptr size, DWORD *old_protection) {\n  return ::VirtualProtect((void*)address, size,\n                          PAGE_EXECUTE_READWRITE,\n                          old_protection) != FALSE;\n}\n\nstatic bool RestoreMemoryProtection(\n    uptr address, uptr size, DWORD old_protection) {\n  DWORD unused;\n  return ::VirtualProtect((void*)address, size,\n                          old_protection,\n                          &unused) != FALSE;\n}\n\nstatic bool IsMemoryPadding(uptr address, uptr size) {\n  u8* function = (u8*)address;\n  for (size_t i = 0; i < size; ++i)\n    if (function[i] != 0x90 && function[i] != 0xCC)\n      return false;\n  return true;\n}\n\nstatic const u8 kHintNop8Bytes[] = {\n  0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00\n};\n\ntemplate<class T>\nstatic bool FunctionHasPrefix(uptr address, const T &pattern) {\n  u8* function = (u8*)address - sizeof(pattern);\n  for (size_t i = 0; i < sizeof(pattern); ++i)\n    if (function[i] != pattern[i])\n      return false;\n  return true;\n}\n\nstatic bool FunctionHasPadding(uptr address, uptr size) {\n  if (IsMemoryPadding(address - size, size))\n    return true;\n  if (size <= sizeof(kHintNop8Bytes) &&\n      FunctionHasPrefix(address, kHintNop8Bytes))\n    return true;\n  return false;\n}\n\nstatic void WritePadding(uptr from, uptr size) {\n  _memset((void*)from, 0xCC, (size_t)size);\n}\n\nstatic void WriteJumpInstruction(uptr from, uptr target) {\n  if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))\n    InterceptionFailed();\n  ptrdiff_t offset = target - from - kJumpInstructionLength;\n  *(u8*)from = 0xE9;\n  *(u32*)(from + 1) = offset;\n}\n\nstatic void WriteShortJumpInstruction(uptr from, uptr target) {\n  sptr offset = target - from - kShortJumpInstructionLength;\n  if (offset < -128 || offset > 127)\n    InterceptionFailed();\n  *(u8*)from = 0xEB;\n  *(u8*)(from + 1) = (u8)offset;\n}\n\n#if SANITIZER_WINDOWS64\nstatic void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {\n  // jmp [rip + <offset>] = FF 25 <offset> where <offset> is a relative\n  // offset.\n  // The offset is the distance from then end of the jump instruction to the\n  // memory location containing the targeted address. The displacement is still\n  // 32-bit in x64, so indirect_target must be located within +/- 2GB range.\n  int offset = indirect_target - from - kIndirectJumpInstructionLength;\n  if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,\n                            indirect_target)) {\n    InterceptionFailed();\n  }\n  *(u16*)from = 0x25FF;\n  *(u32*)(from + 2) = offset;\n}\n#endif\n\nstatic void WriteBranch(\n    uptr from, uptr indirect_target, uptr target) {\n#if SANITIZER_WINDOWS64\n  WriteIndirectJumpInstruction(from, indirect_target);\n  *(u64*)indirect_target = target;\n#else\n  (void)indirect_target;\n  WriteJumpInstruction(from, target);\n#endif\n}\n\nstatic void WriteDirectBranch(uptr from, uptr target) {\n#if SANITIZER_WINDOWS64\n  // Emit an indirect jump through immediately following bytes:\n  //   jmp [rip + kBranchLength]\n  //   .quad <target>\n  WriteBranch(from, from + kBranchLength, target);\n#else\n  WriteJumpInstruction(from, target);\n#endif\n}\n\nstruct TrampolineMemoryRegion {\n  uptr content;\n  uptr allocated_size;\n  uptr max_size;\n};\n\nUNUSED static const uptr kTrampolineScanLimitRange = 1 << 31;  // 2 gig\nstatic const int kMaxTrampolineRegion = 1024;\nstatic TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];\n\nstatic void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {\n#if SANITIZER_WINDOWS64\n  uptr address = image_address;\n  uptr scanned = 0;\n  while (scanned < kTrampolineScanLimitRange) {\n    MEMORY_BASIC_INFORMATION info;\n    if (!::VirtualQuery((void*)address, &info, sizeof(info)))\n      return nullptr;\n\n    // Check whether a region can be allocated at |address|.\n    if (info.State == MEM_FREE && info.RegionSize >= granularity) {\n      void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),\n                                  granularity,\n                                  MEM_RESERVE | MEM_COMMIT,\n                                  PAGE_EXECUTE_READWRITE);\n      return page;\n    }\n\n    // Move to the next region.\n    address = (uptr)info.BaseAddress + info.RegionSize;\n    scanned += info.RegionSize;\n  }\n  return nullptr;\n#else\n  return ::VirtualAlloc(nullptr,\n                        granularity,\n                        MEM_RESERVE | MEM_COMMIT,\n                        PAGE_EXECUTE_READWRITE);\n#endif\n}\n\n// Used by unittests to release mapped memory space.\nvoid TestOnlyReleaseTrampolineRegions() {\n  for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {\n    TrampolineMemoryRegion *current = &TrampolineRegions[bucket];\n    if (current->content == 0)\n      return;\n    ::VirtualFree((void*)current->content, 0, MEM_RELEASE);\n    current->content = 0;\n  }\n}\n\nstatic uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {\n  // Find a region within 2G with enough space to allocate |size| bytes.\n  TrampolineMemoryRegion *region = nullptr;\n  for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {\n    TrampolineMemoryRegion* current = &TrampolineRegions[bucket];\n    if (current->content == 0) {\n      // No valid region found, allocate a new region.\n      size_t bucket_size = GetMmapGranularity();\n      void *content = AllocateTrampolineRegion(image_address, bucket_size);\n      if (content == nullptr)\n        return 0U;\n\n      current->content = (uptr)content;\n      current->allocated_size = 0;\n      current->max_size = bucket_size;\n      region = current;\n      break;\n    } else if (current->max_size - current->allocated_size > size) {\n#if SANITIZER_WINDOWS64\n        // In 64-bits, the memory space must be allocated within 2G boundary.\n        uptr next_address = current->content + current->allocated_size;\n        if (next_address < image_address ||\n            next_address - image_address >= 0x7FFF0000)\n          continue;\n#endif\n      // The space can be allocated in the current region.\n      region = current;\n      break;\n    }\n  }\n\n  // Failed to find a region.\n  if (region == nullptr)\n    return 0U;\n\n  // Allocate the space in the current region.\n  uptr allocated_space = region->content + region->allocated_size;\n  region->allocated_size += size;\n  WritePadding(allocated_space, size);\n\n  return allocated_space;\n}\n\n// The following prologues cannot be patched because of the short jump\n// jumping to the patching region.\n\n#if SANITIZER_WINDOWS64\n// ntdll!wcslen in Win11\n//   488bc1          mov     rax,rcx\n//   0fb710          movzx   edx,word ptr [rax]\n//   4883c002        add     rax,2\n//   6685d2          test    dx,dx\n//   75f4            jne     -12\nstatic const u8 kPrologueWithShortJump1[] = {\n    0x48, 0x8b, 0xc1, 0x0f, 0xb7, 0x10, 0x48, 0x83,\n    0xc0, 0x02, 0x66, 0x85, 0xd2, 0x75, 0xf4,\n};\n\n// ntdll!strrchr in Win11\n//   4c8bc1          mov     r8,rcx\n//   8a01            mov     al,byte ptr [rcx]\n//   48ffc1          inc     rcx\n//   84c0            test    al,al\n//   75f7            jne     -9\nstatic const u8 kPrologueWithShortJump2[] = {\n    0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1,\n    0x84, 0xc0, 0x75, 0xf7,\n};\n#endif\n\n// Returns 0 on error.\nstatic size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {\n#if SANITIZER_WINDOWS64\n  if (memcmp((u8*)address, kPrologueWithShortJump1,\n             sizeof(kPrologueWithShortJump1)) == 0 ||\n      memcmp((u8*)address, kPrologueWithShortJump2,\n             sizeof(kPrologueWithShortJump2)) == 0) {\n    return 0;\n  }\n#endif\n\n  switch (*(u64*)address) {\n    case 0x90909090909006EB:  // stub: jmp over 6 x nop.\n      return 8;\n  }\n\n  switch (*(u8*)address) {\n    case 0x90:  // 90 : nop\n      return 1;\n\n    case 0x50:  // push eax / rax\n    case 0x51:  // push ecx / rcx\n    case 0x52:  // push edx / rdx\n    case 0x53:  // push ebx / rbx\n    case 0x54:  // push esp / rsp\n    case 0x55:  // push ebp / rbp\n    case 0x56:  // push esi / rsi\n    case 0x57:  // push edi / rdi\n    case 0x5D:  // pop ebp / rbp\n      return 1;\n\n    case 0x6A:  // 6A XX = push XX\n      return 2;\n\n    case 0xb8:  // b8 XX XX XX XX : mov eax, XX XX XX XX\n    case 0xB9:  // b9 XX XX XX XX : mov ecx, XX XX XX XX\n      return 5;\n\n    // Cannot overwrite control-instruction. Return 0 to indicate failure.\n    case 0xE9:  // E9 XX XX XX XX : jmp <label>\n    case 0xE8:  // E8 XX XX XX XX : call <func>\n    case 0xC3:  // C3 : ret\n    case 0xEB:  // EB XX : jmp XX (short jump)\n    case 0x70:  // 7Y YY : jy XX (short conditional jump)\n    case 0x71:\n    case 0x72:\n    case 0x73:\n    case 0x74:\n    case 0x75:\n    case 0x76:\n    case 0x77:\n    case 0x78:\n    case 0x79:\n    case 0x7A:\n    case 0x7B:\n    case 0x7C:\n    case 0x7D:\n    case 0x7E:\n    case 0x7F:\n      return 0;\n  }\n\n  switch (*(u16*)(address)) {\n    case 0x018A:  // 8A 01 : mov al, byte ptr [ecx]\n    case 0xFF8B:  // 8B FF : mov edi, edi\n    case 0xEC8B:  // 8B EC : mov ebp, esp\n    case 0xc889:  // 89 C8 : mov eax, ecx\n    case 0xC18B:  // 8B C1 : mov eax, ecx\n    case 0xC033:  // 33 C0 : xor eax, eax\n    case 0xC933:  // 33 C9 : xor ecx, ecx\n    case 0xD233:  // 33 D2 : xor edx, edx\n      return 2;\n\n    // Cannot overwrite control-instruction. Return 0 to indicate failure.\n    case 0x25FF:  // FF 25 XX XX XX XX : jmp [XXXXXXXX]\n      return 0;\n  }\n\n  switch (0x00FFFFFF & *(u32*)address) {\n    case 0x24A48D:  // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]\n      return 7;\n  }\n\n#if SANITIZER_WINDOWS64\n  switch (*(u8*)address) {\n    case 0xA1:  // A1 XX XX XX XX XX XX XX XX :\n                //   movabs eax, dword ptr ds:[XXXXXXXX]\n      return 9;\n\n    case 0x83:\n      const u8 next_byte = *(u8*)(address + 1);\n      const u8 mod = next_byte >> 6;\n      const u8 rm = next_byte & 7;\n      if (mod == 1 && rm == 4)\n        return 5;  // 83 ModR/M SIB Disp8 Imm8\n                   //   add|or|adc|sbb|and|sub|xor|cmp [r+disp8], imm8\n  }\n\n  switch (*(u16*)address) {\n    case 0x5040:  // push rax\n    case 0x5140:  // push rcx\n    case 0x5240:  // push rdx\n    case 0x5340:  // push rbx\n    case 0x5440:  // push rsp\n    case 0x5540:  // push rbp\n    case 0x5640:  // push rsi\n    case 0x5740:  // push rdi\n    case 0x5441:  // push r12\n    case 0x5541:  // push r13\n    case 0x5641:  // push r14\n    case 0x5741:  // push r15\n    case 0x9066:  // Two-byte NOP\n    case 0xc084:  // test al, al\n    case 0x018a:  // mov al, byte ptr [rcx]\n      return 2;\n\n    case 0x058B:  // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]\n      if (rel_offset)\n        *rel_offset = 2;\n      return 6;\n  }\n\n  switch (0x00FFFFFF & *(u32*)address) {\n    case 0xe58948:    // 48 8b c4 : mov rbp, rsp\n    case 0xc18b48:    // 48 8b c1 : mov rax, rcx\n    case 0xc48b48:    // 48 8b c4 : mov rax, rsp\n    case 0xd9f748:    // 48 f7 d9 : neg rcx\n    case 0xd12b48:    // 48 2b d1 : sub rdx, rcx\n    case 0x07c1f6:    // f6 c1 07 : test cl, 0x7\n    case 0xc98548:    // 48 85 C9 : test rcx, rcx\n    case 0xd28548:    // 48 85 d2 : test rdx, rdx\n    case 0xc0854d:    // 4d 85 c0 : test r8, r8\n    case 0xc2b60f:    // 0f b6 c2 : movzx eax, dl\n    case 0xc03345:    // 45 33 c0 : xor r8d, r8d\n    case 0xc93345:    // 45 33 c9 : xor r9d, r9d\n    case 0xdb3345:    // 45 33 DB : xor r11d, r11d\n    case 0xd98b4c:    // 4c 8b d9 : mov r11, rcx\n    case 0xd28b4c:    // 4c 8b d2 : mov r10, rdx\n    case 0xc98b4c:    // 4C 8B C9 : mov r9, rcx\n    case 0xc18b4c:    // 4C 8B C1 : mov r8, rcx\n    case 0xd2b60f:    // 0f b6 d2 : movzx edx, dl\n    case 0xca2b48:    // 48 2b ca : sub rcx, rdx\n    case 0x10b70f:    // 0f b7 10 : movzx edx, WORD PTR [rax]\n    case 0xc00b4d:    // 3d 0b c0 : or r8, r8\n    case 0xc08b41:    // 41 8b c0 : mov eax, r8d\n    case 0xd18b48:    // 48 8b d1 : mov rdx, rcx\n    case 0xdc8b4c:    // 4c 8b dc : mov r11, rsp\n    case 0xd18b4c:    // 4c 8b d1 : mov r10, rcx\n    case 0xE0E483:    // 83 E4 E0 : and esp, 0xFFFFFFE0\n      return 3;\n\n    case 0xec8348:    // 48 83 ec XX : sub rsp, XX\n    case 0xf88349:    // 49 83 f8 XX : cmp r8, XX\n    case 0x588948:    // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx\n      return 4;\n\n    case 0xec8148:    // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX\n      return 7;\n\n    case 0x058b48:    // 48 8b 05 XX XX XX XX :\n                      //   mov rax, QWORD PTR [rip + XXXXXXXX]\n    case 0x25ff48:    // 48 ff 25 XX XX XX XX :\n                      //   rex.W jmp QWORD PTR [rip + XXXXXXXX]\n\n      // Instructions having offset relative to 'rip' need offset adjustment.\n      if (rel_offset)\n        *rel_offset = 3;\n      return 7;\n\n    case 0x2444c7:    // C7 44 24 XX YY YY YY YY\n                      //   mov dword ptr [rsp + XX], YYYYYYYY\n      return 8;\n  }\n\n  switch (*(u32*)(address)) {\n    case 0x24448b48:  // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]\n    case 0x246c8948:  // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp\n    case 0x245c8948:  // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx\n    case 0x24748948:  // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi\n    case 0x247c8948:  // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi\n    case 0x244C8948:  // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx\n    case 0x24548948:  // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx\n    case 0x244c894c:  // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9\n    case 0x2444894c:  // 4c 89 44 24 XX : mov QWORD PTR [rsp + XX], r8\n      return 5;\n    case 0x24648348:  // 48 83 64 24 XX : and QWORD PTR [rsp + XX], YY\n      return 6;\n  }\n\n#else\n\n  switch (*(u8*)address) {\n    case 0xA1:  // A1 XX XX XX XX :  mov eax, dword ptr ds:[XXXXXXXX]\n      return 5;\n  }\n  switch (*(u16*)address) {\n    case 0x458B:  // 8B 45 XX : mov eax, dword ptr [ebp + XX]\n    case 0x5D8B:  // 8B 5D XX : mov ebx, dword ptr [ebp + XX]\n    case 0x7D8B:  // 8B 7D XX : mov edi, dword ptr [ebp + XX]\n    case 0xEC83:  // 83 EC XX : sub esp, XX\n    case 0x75FF:  // FF 75 XX : push dword ptr [ebp + XX]\n      return 3;\n    case 0xC1F7:  // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX\n    case 0x25FF:  // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]\n      return 6;\n    case 0x3D83:  // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX\n      return 7;\n    case 0x7D83:  // 83 7D XX YY : cmp dword ptr [ebp + XX], YY\n      return 4;\n  }\n\n  switch (0x00FFFFFF & *(u32*)address) {\n    case 0x24448A:  // 8A 44 24 XX : mov eal, dword ptr [esp + XX]\n    case 0x24448B:  // 8B 44 24 XX : mov eax, dword ptr [esp + XX]\n    case 0x244C8B:  // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]\n    case 0x24548B:  // 8B 54 24 XX : mov edx, dword ptr [esp + XX]\n    case 0x24748B:  // 8B 74 24 XX : mov esi, dword ptr [esp + XX]\n    case 0x247C8B:  // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]\n      return 4;\n  }\n\n  switch (*(u32*)address) {\n    case 0x2444B60F:  // 0F B6 44 24 XX : movzx eax, byte ptr [esp + XX]\n      return 5;\n  }\n#endif\n\n  // Unknown instruction!\n  // FIXME: Unknown instruction failures might happen when we add a new\n  // interceptor or a new compiler version. In either case, they should result\n  // in visible and readable error messages. However, merely calling abort()\n  // leads to an infinite recursion in CheckFailed.\n  InterceptionFailed();\n  return 0;\n}\n\n// Returns 0 on error.\nstatic size_t RoundUpToInstrBoundary(size_t size, uptr address) {\n  size_t cursor = 0;\n  while (cursor < size) {\n    size_t instruction_size = GetInstructionSize(address + cursor);\n    if (!instruction_size)\n      return 0;\n    cursor += instruction_size;\n  }\n  return cursor;\n}\n\nstatic bool CopyInstructions(uptr to, uptr from, size_t size) {\n  size_t cursor = 0;\n  while (cursor != size) {\n    size_t rel_offset = 0;\n    size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);\n    _memcpy((void*)(to + cursor), (void*)(from + cursor),\n            (size_t)instruction_size);\n    if (rel_offset) {\n      uptr delta = to - from;\n      uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;\n#if SANITIZER_WINDOWS64\n      if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)\n        return false;\n#endif\n      *(u32*)(to + cursor + rel_offset) = relocated_offset;\n    }\n    cursor += instruction_size;\n  }\n  return true;\n}\n\n\n#if !SANITIZER_WINDOWS64\nbool OverrideFunctionWithDetour(\n    uptr old_func, uptr new_func, uptr *orig_old_func) {\n  const int kDetourHeaderLen = 5;\n  const u16 kDetourInstruction = 0xFF8B;\n\n  uptr header = (uptr)old_func - kDetourHeaderLen;\n  uptr patch_length = kDetourHeaderLen + kShortJumpInstructionLength;\n\n  // Validate that the function is hookable.\n  if (*(u16*)old_func != kDetourInstruction ||\n      !IsMemoryPadding(header, kDetourHeaderLen))\n    return false;\n\n  // Change memory protection to writable.\n  DWORD protection = 0;\n  if (!ChangeMemoryProtection(header, patch_length, &protection))\n    return false;\n\n  // Write a relative jump to the redirected function.\n  WriteJumpInstruction(header, new_func);\n\n  // Write the short jump to the function prefix.\n  WriteShortJumpInstruction(old_func, header);\n\n  // Restore previous memory protection.\n  if (!RestoreMemoryProtection(header, patch_length, protection))\n    return false;\n\n  if (orig_old_func)\n    *orig_old_func = old_func + kShortJumpInstructionLength;\n\n  return true;\n}\n#endif\n\nbool OverrideFunctionWithRedirectJump(\n    uptr old_func, uptr new_func, uptr *orig_old_func) {\n  // Check whether the first instruction is a relative jump.\n  if (*(u8*)old_func != 0xE9)\n    return false;\n\n  if (orig_old_func) {\n    uptr relative_offset = *(u32*)(old_func + 1);\n    uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;\n    *orig_old_func = absolute_target;\n  }\n\n#if SANITIZER_WINDOWS64\n  // If needed, get memory space for a trampoline jump.\n  uptr trampoline = AllocateMemoryForTrampoline(old_func, kDirectBranchLength);\n  if (!trampoline)\n    return false;\n  WriteDirectBranch(trampoline, new_func);\n#endif\n\n  // Change memory protection to writable.\n  DWORD protection = 0;\n  if (!ChangeMemoryProtection(old_func, kJumpInstructionLength, &protection))\n    return false;\n\n  // Write a relative jump to the redirected function.\n  WriteJumpInstruction(old_func, FIRST_32_SECOND_64(new_func, trampoline));\n\n  // Restore previous memory protection.\n  if (!RestoreMemoryProtection(old_func, kJumpInstructionLength, protection))\n    return false;\n\n  return true;\n}\n\nbool OverrideFunctionWithHotPatch(\n    uptr old_func, uptr new_func, uptr *orig_old_func) {\n  const int kHotPatchHeaderLen = kBranchLength;\n\n  uptr header = (uptr)old_func - kHotPatchHeaderLen;\n  uptr patch_length = kHotPatchHeaderLen + kShortJumpInstructionLength;\n\n  // Validate that the function is hot patchable.\n  size_t instruction_size = GetInstructionSize(old_func);\n  if (instruction_size < kShortJumpInstructionLength ||\n      !FunctionHasPadding(old_func, kHotPatchHeaderLen))\n    return false;\n\n  if (orig_old_func) {\n    // Put the needed instructions into the trampoline bytes.\n    uptr trampoline_length = instruction_size + kDirectBranchLength;\n    uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);\n    if (!trampoline)\n      return false;\n    if (!CopyInstructions(trampoline, old_func, instruction_size))\n      return false;\n    WriteDirectBranch(trampoline + instruction_size,\n                      old_func + instruction_size);\n    *orig_old_func = trampoline;\n  }\n\n  // If needed, get memory space for indirect address.\n  uptr indirect_address = 0;\n#if SANITIZER_WINDOWS64\n  indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);\n  if (!indirect_address)\n    return false;\n#endif\n\n  // Change memory protection to writable.\n  DWORD protection = 0;\n  if (!ChangeMemoryProtection(header, patch_length, &protection))\n    return false;\n\n  // Write jumps to the redirected function.\n  WriteBranch(header, indirect_address, new_func);\n  WriteShortJumpInstruction(old_func, header);\n\n  // Restore previous memory protection.\n  if (!RestoreMemoryProtection(header, patch_length, protection))\n    return false;\n\n  return true;\n}\n\nbool OverrideFunctionWithTrampoline(\n    uptr old_func, uptr new_func, uptr *orig_old_func) {\n\n  size_t instructions_length = kBranchLength;\n  size_t padding_length = 0;\n  uptr indirect_address = 0;\n\n  if (orig_old_func) {\n    // Find out the number of bytes of the instructions we need to copy\n    // to the trampoline.\n    instructions_length = RoundUpToInstrBoundary(kBranchLength, old_func);\n    if (!instructions_length)\n      return false;\n\n    // Put the needed instructions into the trampoline bytes.\n    uptr trampoline_length = instructions_length + kDirectBranchLength;\n    uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);\n    if (!trampoline)\n      return false;\n    if (!CopyInstructions(trampoline, old_func, instructions_length))\n      return false;\n    WriteDirectBranch(trampoline + instructions_length,\n                      old_func + instructions_length);\n    *orig_old_func = trampoline;\n  }\n\n#if SANITIZER_WINDOWS64\n  // Check if the targeted address can be encoded in the function padding.\n  // Otherwise, allocate it in the trampoline region.\n  if (IsMemoryPadding(old_func - kAddressLength, kAddressLength)) {\n    indirect_address = old_func - kAddressLength;\n    padding_length = kAddressLength;\n  } else {\n    indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);\n    if (!indirect_address)\n      return false;\n  }\n#endif\n\n  // Change memory protection to writable.\n  uptr patch_address = old_func - padding_length;\n  uptr patch_length = instructions_length + padding_length;\n  DWORD protection = 0;\n  if (!ChangeMemoryProtection(patch_address, patch_length, &protection))\n    return false;\n\n  // Patch the original function.\n  WriteBranch(old_func, indirect_address, new_func);\n\n  // Restore previous memory protection.\n  if (!RestoreMemoryProtection(patch_address, patch_length, protection))\n    return false;\n\n  return true;\n}\n\nbool OverrideFunction(\n    uptr old_func, uptr new_func, uptr *orig_old_func) {\n#if !SANITIZER_WINDOWS64\n  if (OverrideFunctionWithDetour(old_func, new_func, orig_old_func))\n    return true;\n#endif\n  if (OverrideFunctionWithRedirectJump(old_func, new_func, orig_old_func))\n    return true;\n  if (OverrideFunctionWithHotPatch(old_func, new_func, orig_old_func))\n    return true;\n  if (OverrideFunctionWithTrampoline(old_func, new_func, orig_old_func))\n    return true;\n  return false;\n}\n\nstatic void **InterestingDLLsAvailable() {\n  static const char *InterestingDLLs[] = {\n      \"kernel32.dll\",\n      \"msvcr100.dll\",      // VS2010\n      \"msvcr110.dll\",      // VS2012\n      \"msvcr120.dll\",      // VS2013\n      \"vcruntime140.dll\",  // VS2015\n      \"ucrtbase.dll\",      // Universal CRT\n      // NTDLL should go last as it exports some functions that we should\n      // override in the CRT [presumably only used internally].\n      \"ntdll.dll\", NULL};\n  static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };\n  if (!result[0]) {\n    for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {\n      if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))\n        result[j++] = (void *)h;\n    }\n  }\n  return &result[0];\n}\n\nnamespace {\n// Utility for reading loaded PE images.\ntemplate <typename T> class RVAPtr {\n public:\n  RVAPtr(void *module, uptr rva)\n      : ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}\n  operator T *() { return ptr_; }\n  T *operator->() { return ptr_; }\n  T *operator++() { return ++ptr_; }\n\n private:\n  T *ptr_;\n};\n} // namespace\n\n// Internal implementation of GetProcAddress. At least since Windows 8,\n// GetProcAddress appears to initialize DLLs before returning function pointers\n// into them. This is problematic for the sanitizers, because they typically\n// want to intercept malloc *before* MSVCRT initializes. Our internal\n// implementation walks the export list manually without doing initialization.\nuptr InternalGetProcAddress(void *module, const char *func_name) {\n  // Check that the module header is full and present.\n  RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);\n  RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);\n  if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE ||  // \"MZ\"\n      headers->Signature != IMAGE_NT_SIGNATURE ||             // \"PE\\0\\0\"\n      headers->FileHeader.SizeOfOptionalHeader <\n          sizeof(IMAGE_OPTIONAL_HEADER)) {\n    return 0;\n  }\n\n  IMAGE_DATA_DIRECTORY *export_directory =\n      &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];\n  if (export_directory->Size == 0)\n    return 0;\n  RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,\n                                         export_directory->VirtualAddress);\n  RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);\n  RVAPtr<DWORD> names(module, exports->AddressOfNames);\n  RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);\n\n  for (DWORD i = 0; i < exports->NumberOfNames; i++) {\n    RVAPtr<char> name(module, names[i]);\n    if (!strcmp(func_name, name)) {\n      DWORD index = ordinals[i];\n      RVAPtr<char> func(module, functions[index]);\n\n      // Handle forwarded functions.\n      DWORD offset = functions[index];\n      if (offset >= export_directory->VirtualAddress &&\n          offset < export_directory->VirtualAddress + export_directory->Size) {\n        // An entry for a forwarded function is a string with the following\n        // format: \"<module> . <function_name>\" that is stored into the\n        // exported directory.\n        char function_name[256];\n        size_t funtion_name_length = _strlen(func);\n        if (funtion_name_length >= sizeof(function_name) - 1)\n          InterceptionFailed();\n\n        _memcpy(function_name, func, funtion_name_length);\n        function_name[funtion_name_length] = '\\0';\n        char* separator = _strchr(function_name, '.');\n        if (!separator)\n          InterceptionFailed();\n        *separator = '\\0';\n\n        void* redirected_module = GetModuleHandleA(function_name);\n        if (!redirected_module)\n          InterceptionFailed();\n        return InternalGetProcAddress(redirected_module, separator + 1);\n      }\n\n      return (uptr)(char *)func;\n    }\n  }\n\n  return 0;\n}\n\nbool OverrideFunction(\n    const char *func_name, uptr new_func, uptr *orig_old_func) {\n  bool hooked = false;\n  void **DLLs = InterestingDLLsAvailable();\n  for (size_t i = 0; DLLs[i]; ++i) {\n    uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);\n    if (func_addr &&\n        OverrideFunction(func_addr, new_func, orig_old_func)) {\n      hooked = true;\n    }\n  }\n  return hooked;\n}\n\nbool OverrideImportedFunction(const char *module_to_patch,\n                              const char *imported_module,\n                              const char *function_name, uptr new_function,\n                              uptr *orig_old_func) {\n  HMODULE module = GetModuleHandleA(module_to_patch);\n  if (!module)\n    return false;\n\n  // Check that the module header is full and present.\n  RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);\n  RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);\n  if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE ||  // \"MZ\"\n      headers->Signature != IMAGE_NT_SIGNATURE ||             // \"PE\\0\\0\"\n      headers->FileHeader.SizeOfOptionalHeader <\n          sizeof(IMAGE_OPTIONAL_HEADER)) {\n    return false;\n  }\n\n  IMAGE_DATA_DIRECTORY *import_directory =\n      &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];\n\n  // Iterate the list of imported DLLs. FirstThunk will be null for the last\n  // entry.\n  RVAPtr<IMAGE_IMPORT_DESCRIPTOR> imports(module,\n                                          import_directory->VirtualAddress);\n  for (; imports->FirstThunk != 0; ++imports) {\n    RVAPtr<const char> modname(module, imports->Name);\n    if (_stricmp(&*modname, imported_module) == 0)\n      break;\n  }\n  if (imports->FirstThunk == 0)\n    return false;\n\n  // We have two parallel arrays: the import address table (IAT) and the table\n  // of names. They start out containing the same data, but the loader rewrites\n  // the IAT to hold imported addresses and leaves the name table in\n  // OriginalFirstThunk alone.\n  RVAPtr<IMAGE_THUNK_DATA> name_table(module, imports->OriginalFirstThunk);\n  RVAPtr<IMAGE_THUNK_DATA> iat(module, imports->FirstThunk);\n  for (; name_table->u1.Ordinal != 0; ++name_table, ++iat) {\n    if (!IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {\n      RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(\n          module, name_table->u1.ForwarderString);\n      const char *funcname = &import_by_name->Name[0];\n      if (strcmp(funcname, function_name) == 0)\n        break;\n    }\n  }\n  if (name_table->u1.Ordinal == 0)\n    return false;\n\n  // Now we have the correct IAT entry. Do the swap. We have to make the page\n  // read/write first.\n  if (orig_old_func)\n    *orig_old_func = iat->u1.AddressOfData;\n  DWORD old_prot, unused_prot;\n  if (!VirtualProtect(&iat->u1.AddressOfData, 4, PAGE_EXECUTE_READWRITE,\n                      &old_prot))\n    return false;\n  iat->u1.AddressOfData = new_function;\n  if (!VirtualProtect(&iat->u1.AddressOfData, 4, old_prot, &unused_prot))\n    return false;  // Not clear if this failure bothers us.\n  return true;\n}\n\n}  // namespace __interception\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/interception/interception_win.h",
    "content": "//===-- interception_linux.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Windows-specific interception methods.\n//===----------------------------------------------------------------------===//\n\n#if SANITIZER_WINDOWS\n\n#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)\n# error \"interception_win.h should be included from interception library only\"\n#endif\n\n#ifndef INTERCEPTION_WIN_H\n#define INTERCEPTION_WIN_H\n\nnamespace __interception {\n// All the functions in the OverrideFunction() family return true on success,\n// false on failure (including \"couldn't find the function\").\n\n// Overrides a function by its address.\nbool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);\n\n// Overrides a function in a system DLL or DLL CRT by its exported name.\nbool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);\n\n// Windows-only replacement for GetProcAddress. Useful for some sanitizers.\nuptr InternalGetProcAddress(void *module, const char *func_name);\n\n// Overrides a function only when it is called from a specific DLL. For example,\n// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without\n// affecting other third party libraries.\nbool OverrideImportedFunction(const char *module_to_patch,\n                              const char *imported_module,\n                              const char *function_name, uptr new_function,\n                              uptr *orig_old_func);\n\n#if !SANITIZER_WINDOWS64\n// Exposed for unittests\nbool OverrideFunctionWithDetour(\n    uptr old_func, uptr new_func, uptr *orig_old_func);\n#endif\n\n// Exposed for unittests\nbool OverrideFunctionWithRedirectJump(\n    uptr old_func, uptr new_func, uptr *orig_old_func);\nbool OverrideFunctionWithHotPatch(\n    uptr old_func, uptr new_func, uptr *orig_old_func);\nbool OverrideFunctionWithTrampoline(\n    uptr old_func, uptr new_func, uptr *orig_old_func);\n\n// Exposed for unittests\nvoid TestOnlyReleaseTrampolineRegions();\n\n}  // namespace __interception\n\n#if defined(INTERCEPTION_DYNAMIC_CRT)\n#define INTERCEPT_FUNCTION_WIN(func)                                           \\\n  ::__interception::OverrideFunction(#func,                                    \\\n                                     (::__interception::uptr)WRAP(func),       \\\n                                     (::__interception::uptr *)&REAL(func))\n#else\n#define INTERCEPT_FUNCTION_WIN(func)                                           \\\n  ::__interception::OverrideFunction((::__interception::uptr)func,             \\\n                                     (::__interception::uptr)WRAP(func),       \\\n                                     (::__interception::uptr *)&REAL(func))\n#endif\n\n#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)\n\n#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func)       \\\n  ::__interception::OverrideImportedFunction(                            \\\n      user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \\\n      (::__interception::uptr *)&REAL(func))\n\n#endif  // INTERCEPTION_WIN_H\n#endif  // SANITIZER_WINDOWS\n"
  },
  {
    "path": "runtime/interception/tests/CMakeLists.txt",
    "content": "include(CompilerRTCompile)\n\nfilter_available_targets(INTERCEPTION_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el)\n\nset(INTERCEPTION_UNITTESTS\n  interception_linux_test.cpp\n  interception_test_main.cpp\n  interception_win_test.cpp\n  )\n\nset(INTERCEPTION_TEST_HEADERS)\n\nset(INTERCEPTION_TEST_CFLAGS_COMMON\n  ${COMPILER_RT_UNITTEST_CFLAGS}\n  ${COMPILER_RT_GTEST_CFLAGS}\n  -I${COMPILER_RT_SOURCE_DIR}/include\n  -I${COMPILER_RT_SOURCE_DIR}/lib\n  -I${COMPILER_RT_SOURCE_DIR}/lib/interception\n  -fno-rtti\n  -O2\n  -Werror=sign-compare)\n\nset(INTERCEPTION_TEST_LINK_FLAGS_COMMON\n  ${COMPILER_RT_UNITTEST_LINK_FLAGS})\n\n# -gline-tables-only must be enough for these tests, so use it if possible.\nif(COMPILER_RT_TEST_COMPILER_ID MATCHES \"Clang\")\n  list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -gline-tables-only)\nelse()\n  list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -g)\nendif()\nif(MSVC)\n  list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -gcodeview)\n  list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON\n    -Wl,-largeaddressaware\n    -Wl,-nodefaultlib:libcmt,-defaultlib:msvcrt,-defaultlib:oldnames\n    )\nendif()\nlist(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -g)\n\nif(NOT MSVC)\n  list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON --driver-mode=g++)\nendif()\n\nif(ANDROID)\n  list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -pie)\nendif()\n\nset(INTERCEPTION_TEST_LINK_LIBS)\nappend_list_if(COMPILER_RT_HAS_LIBLOG log INTERCEPTION_TEST_LINK_LIBS)\n# NDK r10 requires -latomic almost always.\nappend_list_if(ANDROID atomic INTERCEPTION_TEST_LINK_LIBS)\n\nappend_list_if(COMPILER_RT_HAS_LIBDL -ldl INTERCEPTION_TEST_LINK_FLAGS_COMMON)\nappend_list_if(COMPILER_RT_HAS_LIBRT -lrt INTERCEPTION_TEST_LINK_FLAGS_COMMON)\nappend_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread INTERCEPTION_TEST_LINK_FLAGS_COMMON)\n# x86_64 FreeBSD 9.2 additionally requires libc++ to build the tests. Also,\n# 'libm' shall be specified explicitly to build i386 tests.\nif(CMAKE_SYSTEM MATCHES \"FreeBSD-9.2-RELEASE\")\n  list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON \"-lc++ -lm\")\nendif()\n\ninclude_directories(..)\ninclude_directories(../..)\n\n# Adds static library which contains interception object file\n# (universal binary on Mac and arch-specific object files on Linux).\nmacro(add_interceptor_lib library)\n  add_library(${library} STATIC ${ARGN})\n  set_target_properties(${library} PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}\n    FOLDER \"Compiler-RT Runtime tests\")\nendmacro()\n\nfunction(get_interception_lib_for_arch arch lib)\n  if(APPLE)\n    set(tgt_name \"RTInterception.test.osx\")\n  else()\n    set(tgt_name \"RTInterception.test.${arch}\")\n  endif()\n  set(${lib} \"${tgt_name}\" PARENT_SCOPE)\nendfunction()\n\n# Interception unit tests testsuite.\nadd_custom_target(InterceptionUnitTests)\nset_target_properties(InterceptionUnitTests PROPERTIES\n  FOLDER \"Compiler-RT Tests\")\n\n# Adds interception tests for architecture.\nmacro(add_interception_tests_for_arch arch)\n  set(INTERCEPTION_TEST_OBJECTS)\n  get_interception_lib_for_arch(${arch} INTERCEPTION_COMMON_LIB)\n  generate_compiler_rt_tests(INTERCEPTION_TEST_OBJECTS\n    InterceptionUnitTests \"Interception-${arch}-Test\" ${arch}\n    RUNTIME ${INTERCEPTION_COMMON_LIB}\n    SOURCES ${INTERCEPTION_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE}\n    COMPILE_DEPS ${INTERCEPTION_TEST_HEADERS}\n    DEPS gtest\n    CFLAGS ${INTERCEPTION_TEST_CFLAGS_COMMON}\n    LINK_FLAGS ${INTERCEPTION_TEST_LINK_FLAGS_COMMON})\nendmacro()\n\nif(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID AND NOT APPLE)\n  # We use just-built clang to build interception unittests, so we must\n  # be sure that produced binaries would work.\n  if(APPLE)\n    add_interceptor_lib(\"RTInterception.test.osx\"\n                        $<TARGET_OBJECTS:RTInterception.osx>)\n  else()\n    foreach(arch ${INTERCEPTION_UNITTEST_SUPPORTED_ARCH})\n      add_interceptor_lib(\"RTInterception.test.${arch}\"\n                          $<TARGET_OBJECTS:RTInterception.${arch}>)\n    endforeach()\n  endif()\n  foreach(arch ${INTERCEPTION_UNITTEST_SUPPORTED_ARCH})\n    add_interception_tests_for_arch(${arch})\n  endforeach()\nendif()\n"
  },
  {
    "path": "runtime/interception/tests/interception_linux_test.cpp",
    "content": "//===-- interception_linux_test.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Tests for interception_linux.h.\n//\n//===----------------------------------------------------------------------===//\n\n// Do not declare isdigit in ctype.h.\n#define __NO_CTYPE\n\n#include \"interception/interception.h\"\n\n#include \"gtest/gtest.h\"\n\n// Too slow for debug build\n#if !SANITIZER_DEBUG\n#if SANITIZER_LINUX\n\nstatic int InterceptorFunctionCalled;\n\nDECLARE_REAL(int, isdigit, int);\n\nINTERCEPTOR(int, isdigit, int d) {\n  ++InterceptorFunctionCalled;\n  return d >= '0' && d <= '9';\n}\n\nnamespace __interception {\n\nTEST(Interception, InterceptFunction) {\n  uptr malloc_address = 0;\n  EXPECT_TRUE(InterceptFunction(\"malloc\", &malloc_address, 0, 0));\n  EXPECT_NE(0U, malloc_address);\n  EXPECT_FALSE(InterceptFunction(\"malloc\", &malloc_address, 0, 1));\n\n  uptr dummy_address = 0;\n  EXPECT_FALSE(InterceptFunction(\"dummy_doesnt_exist__\", &dummy_address, 0, 0));\n  EXPECT_EQ(0U, dummy_address);\n}\n\nTEST(Interception, Basic) {\n  EXPECT_TRUE(INTERCEPT_FUNCTION(isdigit));\n\n  // After interception, the counter should be incremented.\n  InterceptorFunctionCalled = 0;\n  EXPECT_NE(0, isdigit('1'));\n  EXPECT_EQ(1, InterceptorFunctionCalled);\n  EXPECT_EQ(0, isdigit('a'));\n  EXPECT_EQ(2, InterceptorFunctionCalled);\n\n  // Calling the REAL function should not affect the counter.\n  InterceptorFunctionCalled = 0;\n  EXPECT_NE(0, REAL(isdigit)('1'));\n  EXPECT_EQ(0, REAL(isdigit)('a'));\n  EXPECT_EQ(0, InterceptorFunctionCalled);\n}\n\n}  // namespace __interception\n\n#endif  // SANITIZER_LINUX\n#endif  // #if !SANITIZER_DEBUG\n"
  },
  {
    "path": "runtime/interception/tests/interception_test_main.cpp",
    "content": "//===-- interception_test_main.cpp ----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of AddressSanitizer, an address sanity checker.\n//\n// Testing the machinery for providing replacements/wrappers for system\n// functions.\n//===----------------------------------------------------------------------===//\n\n#include \"gtest/gtest.h\"\n\nint main(int argc, char **argv) {\n  testing::GTEST_FLAG(death_test_style) = \"threadsafe\";\n  testing::InitGoogleTest(&argc, argv);\n  return RUN_ALL_TESTS();\n}\n"
  },
  {
    "path": "runtime/interception/tests/interception_win_test.cpp",
    "content": "//===-- interception_win_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Tests for interception_win.h.\n//\n//===----------------------------------------------------------------------===//\n#include \"interception/interception.h\"\n\n#include \"gtest/gtest.h\"\n\n// Too slow for debug build\n#if !SANITIZER_DEBUG\n#if SANITIZER_WINDOWS\n\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n\nnamespace __interception {\nnamespace {\n\nenum FunctionPrefixKind {\n  FunctionPrefixNone,\n  FunctionPrefixPadding,\n  FunctionPrefixHotPatch,\n  FunctionPrefixDetour,\n};\n\ntypedef bool (*TestOverrideFunction)(uptr, uptr, uptr*);\ntypedef int (*IdentityFunction)(int);\n\n#if SANITIZER_WINDOWS64\n\nconst u8 kIdentityCodeWithPrologue[] = {\n    0x55,                   // push        rbp\n    0x48, 0x89, 0xE5,       // mov         rbp,rsp\n    0x8B, 0xC1,             // mov         eax,ecx\n    0x5D,                   // pop         rbp\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityCodeWithPushPop[] = {\n    0x55,                   // push        rbp\n    0x48, 0x89, 0xE5,       // mov         rbp,rsp\n    0x53,                   // push        rbx\n    0x50,                   // push        rax\n    0x58,                   // pop         rax\n    0x8B, 0xC1,             // mov         rax,rcx\n    0x5B,                   // pop         rbx\n    0x5D,                   // pop         rbp\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityTwiceOffset = 16;\nconst u8 kIdentityTwice[] = {\n    0x55,                   // push        rbp\n    0x48, 0x89, 0xE5,       // mov         rbp,rsp\n    0x8B, 0xC1,             // mov         eax,ecx\n    0x5D,                   // pop         rbp\n    0xC3,                   // ret\n    0x90, 0x90, 0x90, 0x90,\n    0x90, 0x90, 0x90, 0x90,\n    0x55,                   // push        rbp\n    0x48, 0x89, 0xE5,       // mov         rbp,rsp\n    0x8B, 0xC1,             // mov         eax,ecx\n    0x5D,                   // pop         rbp\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityCodeWithMov[] = {\n    0x89, 0xC8,             // mov         eax, ecx\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityCodeWithJump[] = {\n    0xE9, 0x04, 0x00, 0x00,\n    0x00,                   // jmp + 4\n    0xCC, 0xCC, 0xCC, 0xCC,\n    0x89, 0xC8,             // mov         eax, ecx\n    0xC3,                   // ret\n};\n\n#else\n\nconst u8 kIdentityCodeWithPrologue[] = {\n    0x55,                   // push        ebp\n    0x8B, 0xEC,             // mov         ebp,esp\n    0x8B, 0x45, 0x08,       // mov         eax,dword ptr [ebp + 8]\n    0x5D,                   // pop         ebp\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityCodeWithPushPop[] = {\n    0x55,                   // push        ebp\n    0x8B, 0xEC,             // mov         ebp,esp\n    0x53,                   // push        ebx\n    0x50,                   // push        eax\n    0x58,                   // pop         eax\n    0x8B, 0x45, 0x08,       // mov         eax,dword ptr [ebp + 8]\n    0x5B,                   // pop         ebx\n    0x5D,                   // pop         ebp\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityTwiceOffset = 8;\nconst u8 kIdentityTwice[] = {\n    0x55,                   // push        ebp\n    0x8B, 0xEC,             // mov         ebp,esp\n    0x8B, 0x45, 0x08,       // mov         eax,dword ptr [ebp + 8]\n    0x5D,                   // pop         ebp\n    0xC3,                   // ret\n    0x55,                   // push        ebp\n    0x8B, 0xEC,             // mov         ebp,esp\n    0x8B, 0x45, 0x08,       // mov         eax,dword ptr [ebp + 8]\n    0x5D,                   // pop         ebp\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityCodeWithMov[] = {\n    0x8B, 0x44, 0x24, 0x04, // mov         eax,dword ptr [esp + 4]\n    0xC3,                   // ret\n};\n\nconst u8 kIdentityCodeWithJump[] = {\n    0xE9, 0x04, 0x00, 0x00,\n    0x00,                   // jmp + 4\n    0xCC, 0xCC, 0xCC, 0xCC,\n    0x8B, 0x44, 0x24, 0x04, // mov         eax,dword ptr [esp + 4]\n    0xC3,                   // ret\n};\n\n#endif\n\nconst u8 kPatchableCode1[] = {\n    0xB8, 0x4B, 0x00, 0x00, 0x00,   // mov eax,4B\n    0x33, 0xC9,                     // xor ecx,ecx\n    0xC3,                           // ret\n};\n\nconst u8 kPatchableCode2[] = {\n    0x55,                           // push ebp\n    0x8B, 0xEC,                     // mov ebp,esp\n    0x33, 0xC0,                     // xor eax,eax\n    0x5D,                           // pop ebp\n    0xC3,                           // ret\n};\n\nconst u8 kPatchableCode3[] = {\n    0x55,                           // push ebp\n    0x8B, 0xEC,                     // mov ebp,esp\n    0x6A, 0x00,                     // push 0\n    0xE8, 0x3D, 0xFF, 0xFF, 0xFF,   // call <func>\n};\n\nconst u8 kPatchableCode4[] = {\n    0xE9, 0xCC, 0xCC, 0xCC, 0xCC,   // jmp <label>\n    0x90, 0x90, 0x90, 0x90,\n};\n\nconst u8 kPatchableCode5[] = {\n    0x55,                                      // push    ebp\n    0x8b, 0xec,                                // mov     ebp,esp\n    0x8d, 0xa4, 0x24, 0x30, 0xfd, 0xff, 0xff,  // lea     esp,[esp-2D0h]\n    0x54,                                      // push    esp\n};\n\n#if SANITIZER_WINDOWS64\nu8 kLoadGlobalCode[] = {\n  0x8B, 0x05, 0x00, 0x00, 0x00, 0x00, // mov    eax [rip + global]\n  0xC3,                               // ret\n};\n#endif\n\nconst u8 kUnpatchableCode1[] = {\n    0xC3,                           // ret\n};\n\nconst u8 kUnpatchableCode2[] = {\n    0x33, 0xC9,                     // xor ecx,ecx\n    0xC3,                           // ret\n};\n\nconst u8 kUnpatchableCode3[] = {\n    0x75, 0xCC,                     // jne <label>\n    0x33, 0xC9,                     // xor ecx,ecx\n    0xC3,                           // ret\n};\n\nconst u8 kUnpatchableCode4[] = {\n    0x74, 0xCC,                     // jne <label>\n    0x33, 0xC9,                     // xor ecx,ecx\n    0xC3,                           // ret\n};\n\nconst u8 kUnpatchableCode5[] = {\n    0xEB, 0x02,                     // jmp <label>\n    0x33, 0xC9,                     // xor ecx,ecx\n    0xC3,                           // ret\n};\n\nconst u8 kUnpatchableCode6[] = {\n    0xE8, 0xCC, 0xCC, 0xCC, 0xCC,   // call <func>\n    0x90, 0x90, 0x90, 0x90,\n};\n\nconst u8 kUnpatchableCode7[] = {\n    0x33, 0xc0,                     // xor     eax,eax\n    0x48, 0x85, 0xd2,               // test    rdx,rdx\n    0x74, 0x10,                     // je      +16  (unpatchable)\n};\n\nconst u8 kUnpatchableCode8[] = {\n    0x48, 0x8b, 0xc1,               // mov     rax,rcx\n    0x0f, 0xb7, 0x10,               // movzx   edx,word ptr [rax]\n    0x48, 0x83, 0xc0, 0x02,         // add     rax,2\n    0x66, 0x85, 0xd2,               // test    dx,dx\n    0x75, 0xf4,                     // jne     -12  (unpatchable)\n};\n\nconst u8 kUnpatchableCode9[] = {\n    0x4c, 0x8b, 0xc1,               // mov     r8,rcx\n    0x8a, 0x01,                     // mov     al,byte ptr [rcx]\n    0x48, 0xff, 0xc1,               // inc     rcx\n    0x84, 0xc0,                     // test    al,al\n    0x75, 0xf7,                     // jne     -9  (unpatchable)\n};\n\nconst u8 kPatchableCode6[] = {\n    0x48, 0x89, 0x54, 0x24, 0xBB, // mov QWORD PTR [rsp + 0xBB], rdx\n    0x33, 0xC9,                   // xor ecx,ecx\n    0xC3,                         // ret\n};\n\nconst u8 kPatchableCode7[] = {\n    0x4c, 0x89, 0x4c, 0x24, 0xBB,  // mov QWORD PTR [rsp + 0xBB], r9\n    0x33, 0xC9,                   // xor ecx,ecx\n    0xC3,                         // ret\n};\n\nconst u8 kPatchableCode8[] = {\n    0x4c, 0x89, 0x44, 0x24, 0xBB, // mov QWORD PTR [rsp + 0xBB], r8\n    0x33, 0xC9,                   // xor ecx,ecx\n    0xC3,                         // ret\n};\n\nconst u8 kPatchableCode9[] = {\n    0x8a, 0x01,                     // al,byte ptr [rcx]\n    0x45, 0x33, 0xc0,               // xor     r8d,r8d\n    0x84, 0xc0,                     // test    al,al\n};\n\nconst u8 kPatchableCode10[] = {\n    0x45, 0x33, 0xc0,               // xor     r8d,r8d\n    0x41, 0x8b, 0xc0,               // mov     eax,r8d\n    0x48, 0x85, 0xd2,               // test    rdx,rdx\n};\n\nconst u8 kPatchableCode11[] = {\n    0x48, 0x83, 0xec, 0x38,         // sub     rsp,38h\n    0x83, 0x64, 0x24, 0x28, 0x00,   // and     dword ptr [rsp+28h],0\n};\n\n// A buffer holding the dynamically generated code under test.\nu8* ActiveCode;\nconst size_t ActiveCodeLength = 4096;\n\nint InterceptorFunction(int x);\n\n/// Allocate code memory more than 2GB away from Base.\nu8 *AllocateCode2GBAway(u8 *Base) {\n  // Find a 64K aligned location after Base plus 2GB.\n  size_t TwoGB = 0x80000000;\n  size_t AllocGranularity = 0x10000;\n  Base = (u8 *)((((uptr)Base + TwoGB + AllocGranularity)) & ~(AllocGranularity - 1));\n\n  // Check if that location is free, and if not, loop over regions until we find\n  // one that is.\n  MEMORY_BASIC_INFORMATION mbi = {};\n  while (sizeof(mbi) == VirtualQuery(Base, &mbi, sizeof(mbi))) {\n    if (mbi.State & MEM_FREE) break;\n    Base += mbi.RegionSize;\n  }\n\n  // Allocate one RWX page at the free location.\n  return (u8 *)::VirtualAlloc(Base, ActiveCodeLength, MEM_COMMIT | MEM_RESERVE,\n                              PAGE_EXECUTE_READWRITE);\n}\n\ntemplate<class T>\nstatic void LoadActiveCode(\n    const T &code,\n    uptr *entry_point,\n    FunctionPrefixKind prefix_kind = FunctionPrefixNone) {\n  if (ActiveCode == nullptr) {\n    ActiveCode = AllocateCode2GBAway((u8*)&InterceptorFunction);\n    ASSERT_NE(ActiveCode, nullptr) << \"failed to allocate RWX memory 2GB away\";\n  }\n\n  size_t position = 0;\n\n  // Add padding to avoid memory violation when scanning the prefix.\n  for (int i = 0; i < 16; ++i)\n    ActiveCode[position++] = 0xC3;  // Instruction 'ret'.\n\n  // Add function padding.\n  size_t padding = 0;\n  if (prefix_kind == FunctionPrefixPadding)\n    padding = 16;\n  else if (prefix_kind == FunctionPrefixDetour ||\n           prefix_kind == FunctionPrefixHotPatch)\n    padding = FIRST_32_SECOND_64(5, 6);\n  // Insert |padding| instructions 'nop'.\n  for (size_t i = 0; i < padding; ++i)\n    ActiveCode[position++] = 0x90;\n\n  // Keep track of the entry point.\n  *entry_point = (uptr)&ActiveCode[position];\n\n  // Add the detour instruction (i.e. mov edi, edi)\n  if (prefix_kind == FunctionPrefixDetour) {\n#if SANITIZER_WINDOWS64\n    // Note that \"mov edi,edi\" is NOP in 32-bit only, in 64-bit it clears\n    // higher bits of RDI.\n    // Use 66,90H as NOP for Windows64.\n    ActiveCode[position++] = 0x66;\n    ActiveCode[position++] = 0x90;\n#else\n    // mov edi,edi.\n    ActiveCode[position++] = 0x8B;\n    ActiveCode[position++] = 0xFF;\n#endif\n\n  }\n\n  // Copy the function body.\n  for (size_t i = 0; i < sizeof(T); ++i)\n    ActiveCode[position++] = code[i];\n}\n\nint InterceptorFunctionCalled;\nIdentityFunction InterceptedRealFunction;\n\nint InterceptorFunction(int x) {\n  ++InterceptorFunctionCalled;\n  return InterceptedRealFunction(x);\n}\n\n}  // namespace\n\n// Tests for interception_win.h\nTEST(Interception, InternalGetProcAddress) {\n  HMODULE ntdll_handle = ::GetModuleHandle(\"ntdll\");\n  ASSERT_NE(nullptr, ntdll_handle);\n  uptr DbgPrint_expected = (uptr)::GetProcAddress(ntdll_handle, \"DbgPrint\");\n  uptr isdigit_expected = (uptr)::GetProcAddress(ntdll_handle, \"isdigit\");\n  uptr DbgPrint_adddress = InternalGetProcAddress(ntdll_handle, \"DbgPrint\");\n  uptr isdigit_address = InternalGetProcAddress(ntdll_handle, \"isdigit\");\n\n  EXPECT_EQ(DbgPrint_expected, DbgPrint_adddress);\n  EXPECT_EQ(isdigit_expected, isdigit_address);\n  EXPECT_NE(DbgPrint_adddress, isdigit_address);\n}\n\ntemplate<class T>\nstatic void TestIdentityFunctionPatching(\n    const T &code,\n    TestOverrideFunction override,\n    FunctionPrefixKind prefix_kind = FunctionPrefixNone) {\n  uptr identity_address;\n  LoadActiveCode(code, &identity_address, prefix_kind);\n  IdentityFunction identity = (IdentityFunction)identity_address;\n\n  // Validate behavior before dynamic patching.\n  InterceptorFunctionCalled = 0;\n  EXPECT_EQ(0, identity(0));\n  EXPECT_EQ(42, identity(42));\n  EXPECT_EQ(0, InterceptorFunctionCalled);\n\n  // Patch the function.\n  uptr real_identity_address = 0;\n  bool success = override(identity_address,\n                         (uptr)&InterceptorFunction,\n                         &real_identity_address);\n  EXPECT_TRUE(success);\n  EXPECT_NE(0U, real_identity_address);\n  IdentityFunction real_identity = (IdentityFunction)real_identity_address;\n  InterceptedRealFunction = real_identity;\n\n  // Don't run tests if hooking failed or the real function is not valid.\n  if (!success || !real_identity_address)\n    return;\n\n  // Calling the redirected function.\n  InterceptorFunctionCalled = 0;\n  EXPECT_EQ(0, identity(0));\n  EXPECT_EQ(42, identity(42));\n  EXPECT_EQ(2, InterceptorFunctionCalled);\n\n  // Calling the real function.\n  InterceptorFunctionCalled = 0;\n  EXPECT_EQ(0, real_identity(0));\n  EXPECT_EQ(42, real_identity(42));\n  EXPECT_EQ(0, InterceptorFunctionCalled);\n\n  TestOnlyReleaseTrampolineRegions();\n}\n\n#if !SANITIZER_WINDOWS64\nTEST(Interception, OverrideFunctionWithDetour) {\n  TestOverrideFunction override = OverrideFunctionWithDetour;\n  FunctionPrefixKind prefix = FunctionPrefixDetour;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);\n}\n#endif  // !SANITIZER_WINDOWS64\n\nTEST(Interception, OverrideFunctionWithRedirectJump) {\n  TestOverrideFunction override = OverrideFunctionWithRedirectJump;\n  TestIdentityFunctionPatching(kIdentityCodeWithJump, override);\n}\n\nTEST(Interception, OverrideFunctionWithHotPatch) {\n  TestOverrideFunction override = OverrideFunctionWithHotPatch;\n  FunctionPrefixKind prefix = FunctionPrefixHotPatch;\n  TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);\n}\n\nTEST(Interception, OverrideFunctionWithTrampoline) {\n  TestOverrideFunction override = OverrideFunctionWithTrampoline;\n  FunctionPrefixKind prefix = FunctionPrefixNone;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n\n  prefix = FunctionPrefixPadding;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n}\n\nTEST(Interception, OverrideFunction) {\n  TestOverrideFunction override = OverrideFunction;\n  FunctionPrefixKind prefix = FunctionPrefixNone;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);\n\n  prefix = FunctionPrefixPadding;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);\n\n  prefix = FunctionPrefixHotPatch;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);\n\n  prefix = FunctionPrefixDetour;\n  TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);\n  TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);\n}\n\ntemplate<class T>\nstatic void TestIdentityFunctionMultiplePatching(\n    const T &code,\n    TestOverrideFunction override,\n    FunctionPrefixKind prefix_kind = FunctionPrefixNone) {\n  uptr identity_address;\n  LoadActiveCode(code, &identity_address, prefix_kind);\n\n  // Patch the function.\n  uptr real_identity_address = 0;\n  bool success = override(identity_address,\n                          (uptr)&InterceptorFunction,\n                          &real_identity_address);\n  EXPECT_TRUE(success);\n  EXPECT_NE(0U, real_identity_address);\n\n  // Re-patching the function should not work.\n  success = override(identity_address,\n                     (uptr)&InterceptorFunction,\n                     &real_identity_address);\n  EXPECT_FALSE(success);\n\n  TestOnlyReleaseTrampolineRegions();\n}\n\nTEST(Interception, OverrideFunctionMultiplePatchingIsFailing) {\n#if !SANITIZER_WINDOWS64\n  TestIdentityFunctionMultiplePatching(kIdentityCodeWithPrologue,\n                                       OverrideFunctionWithDetour,\n                                       FunctionPrefixDetour);\n#endif\n\n  TestIdentityFunctionMultiplePatching(kIdentityCodeWithMov,\n                                       OverrideFunctionWithHotPatch,\n                                       FunctionPrefixHotPatch);\n\n  TestIdentityFunctionMultiplePatching(kIdentityCodeWithPushPop,\n                                       OverrideFunctionWithTrampoline,\n                                       FunctionPrefixPadding);\n}\n\nTEST(Interception, OverrideFunctionTwice) {\n  uptr identity_address1;\n  LoadActiveCode(kIdentityTwice, &identity_address1);\n  uptr identity_address2 = identity_address1 + kIdentityTwiceOffset;\n  IdentityFunction identity1 = (IdentityFunction)identity_address1;\n  IdentityFunction identity2 = (IdentityFunction)identity_address2;\n\n  // Patch the two functions.\n  uptr real_identity_address = 0;\n  EXPECT_TRUE(OverrideFunction(identity_address1,\n                               (uptr)&InterceptorFunction,\n                               &real_identity_address));\n  EXPECT_TRUE(OverrideFunction(identity_address2,\n                               (uptr)&InterceptorFunction,\n                               &real_identity_address));\n  IdentityFunction real_identity = (IdentityFunction)real_identity_address;\n  InterceptedRealFunction = real_identity;\n\n  // Calling the redirected function.\n  InterceptorFunctionCalled = 0;\n  EXPECT_EQ(42, identity1(42));\n  EXPECT_EQ(42, identity2(42));\n  EXPECT_EQ(2, InterceptorFunctionCalled);\n\n  TestOnlyReleaseTrampolineRegions();\n}\n\ntemplate<class T>\nstatic bool TestFunctionPatching(\n    const T &code,\n    TestOverrideFunction override,\n    FunctionPrefixKind prefix_kind = FunctionPrefixNone) {\n  uptr address;\n  LoadActiveCode(code, &address, prefix_kind);\n  uptr unused_real_address = 0;\n  bool result = override(\n      address, (uptr)&InterceptorFunction, &unused_real_address);\n\n  TestOnlyReleaseTrampolineRegions();\n  return result;\n}\n\nTEST(Interception, PatchableFunction) {\n  TestOverrideFunction override = OverrideFunction;\n  // Test without function padding.\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override));\n#if SANITIZER_WINDOWS64\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override));\n#else\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override));\n#endif\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode5, override));\n#if SANITIZER_WINDOWS64\n  EXPECT_TRUE(TestFunctionPatching(kLoadGlobalCode, override));\n#endif\n\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override));\n}\n\n#if !SANITIZER_WINDOWS64\nTEST(Interception, PatchableFunctionWithDetour) {\n  TestOverrideFunction override = OverrideFunctionWithDetour;\n  // Without the prefix, no function can be detoured.\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode1, override));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode2, override));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode4, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override));\n\n  // With the prefix, all functions can be detoured.\n  FunctionPrefixKind prefix = FunctionPrefixDetour;\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode1, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode2, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode3, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode4, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode5, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode6, override, prefix));\n}\n#endif  // !SANITIZER_WINDOWS64\n\nTEST(Interception, PatchableFunctionWithRedirectJump) {\n  TestOverrideFunction override = OverrideFunctionWithRedirectJump;\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode1, override));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode2, override));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override));\n}\n\nTEST(Interception, PatchableFunctionWithHotPatch) {\n  TestOverrideFunction override = OverrideFunctionWithHotPatch;\n  FunctionPrefixKind prefix = FunctionPrefixHotPatch;\n\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode2, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode4, override, prefix));\n#if SANITIZER_WINDOWS64\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode6, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode7, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode8, override, prefix));\n#endif\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode2, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));\n}\n\nTEST(Interception, PatchableFunctionWithTrampoline) {\n  TestOverrideFunction override = OverrideFunctionWithTrampoline;\n  FunctionPrefixKind prefix = FunctionPrefixPadding;\n\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override, prefix));\n#if SANITIZER_WINDOWS64\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode9, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode10, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode11, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode7, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode8, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode9, override, prefix));\n#else\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override, prefix));\n#endif\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode4, override, prefix));\n\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));\n}\n\nTEST(Interception, PatchableFunctionPadding) {\n  TestOverrideFunction override = OverrideFunction;\n  FunctionPrefixKind prefix = FunctionPrefixPadding;\n\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override, prefix));\n#if SANITIZER_WINDOWS64\n  EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override, prefix));\n#else\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override, prefix));\n#endif\n  EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override, prefix));\n\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override, prefix));\n  EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode2, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override, prefix));\n  EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));\n}\n\nTEST(Interception, EmptyExportTable) {\n  // We try to get a pointer to a function from an executable that doesn't\n  // export any symbol (empty export table).\n  uptr FunPtr = InternalGetProcAddress((void *)GetModuleHandleA(0), \"example\");\n  EXPECT_EQ(0U, FunPtr);\n}\n\n}  // namespace __interception\n\n#endif  // SANITIZER_WINDOWS\n#endif  // #if !SANITIZER_DEBUG\n"
  },
  {
    "path": "runtime/libclang_rt.dfsan-x86_64.a.syms",
    "content": "{\n  __dfsan_*;\n  __dfsw_*;\n  __interceptor_mmap;\n  __interceptor_mmap64;\n  __sanitizer_cov;\n  __sanitizer_cov_dump;\n  __sanitizer_cov_indir_call16;\n  __sanitizer_cov_init;\n  __sanitizer_cov_module_init;\n  __sanitizer_cov_trace_basic_block;\n  __sanitizer_cov_trace_cmp;\n  __sanitizer_cov_trace_cmp1;\n  __sanitizer_cov_trace_cmp2;\n  __sanitizer_cov_trace_cmp4;\n  __sanitizer_cov_trace_cmp8;\n  __sanitizer_cov_trace_div4;\n  __sanitizer_cov_trace_div8;\n  __sanitizer_cov_trace_func_enter;\n  __sanitizer_cov_trace_gep;\n  __sanitizer_cov_trace_pc_guard;\n  __sanitizer_cov_trace_pc_guard_init;\n  __sanitizer_cov_trace_pc_indir;\n  __sanitizer_cov_trace_switch;\n  __sanitizer_cov_with_check;\n  __sanitizer_get_coverage_guards;\n  __sanitizer_get_number_of_counters;\n  __sanitizer_get_total_unique_caller_callee_pairs;\n  __sanitizer_get_total_unique_coverage;\n  __sanitizer_install_malloc_and_free_hooks;\n  __sanitizer_maybe_open_cov_file;\n  __sanitizer_report_error_summary;\n  __sanitizer_reset_coverage;\n  __sanitizer_sandbox_on_notify;\n  __sanitizer_set_death_callback;\n  __sanitizer_set_report_fd;\n  __sanitizer_set_report_path;\n  __sanitizer_symbolize_global;\n  __sanitizer_symbolize_pc;\n  __sanitizer_update_counter_bitset_and_clear_counters;\n  __taint_*\n  dfsan_*;\n  mmap;\n  mmap64;\n};\n"
  },
  {
    "path": "runtime/sanitizer_common/.clang-format",
    "content": "BasedOnStyle: Google\nAllowShortIfStatementsOnASingleLine: false\nIndentPPDirectives: AfterHash\n"
  },
  {
    "path": "runtime/sanitizer_common/CMakeLists.txt",
    "content": "# Build system for the common Sanitizer runtime support library components.\n# These components are shared between AddressSanitizer and ThreadSanitizer.\n\nset(SANITIZER_SOURCES_NOTERMINATION\n  sanitizer_allocator.cpp\n  sanitizer_common.cpp\n  sanitizer_deadlock_detector1.cpp\n  sanitizer_deadlock_detector2.cpp\n  sanitizer_errno.cpp\n  sanitizer_file.cpp\n  sanitizer_flags.cpp\n  sanitizer_flag_parser.cpp\n  sanitizer_fuchsia.cpp\n  sanitizer_libc.cpp\n  sanitizer_libignore.cpp\n  sanitizer_linux.cpp\n  sanitizer_linux_s390.cpp\n  sanitizer_mac.cpp\n  sanitizer_mutex.cpp\n  sanitizer_netbsd.cpp\n  sanitizer_platform_limits_freebsd.cpp\n  sanitizer_platform_limits_linux.cpp\n  sanitizer_platform_limits_netbsd.cpp\n  sanitizer_platform_limits_posix.cpp\n  sanitizer_platform_limits_solaris.cpp\n  sanitizer_posix.cpp\n  sanitizer_printf.cpp\n  sanitizer_procmaps_common.cpp\n  sanitizer_procmaps_bsd.cpp\n  sanitizer_procmaps_fuchsia.cpp\n  sanitizer_procmaps_linux.cpp\n  sanitizer_procmaps_mac.cpp\n  sanitizer_procmaps_solaris.cpp\n  sanitizer_solaris.cpp\n  sanitizer_stoptheworld_fuchsia.cpp\n  sanitizer_stoptheworld_mac.cpp\n  sanitizer_stoptheworld_win.cpp\n  sanitizer_suppressions.cpp\n  sanitizer_tls_get_addr.cpp\n  sanitizer_thread_registry.cpp\n  sanitizer_type_traits.cpp\n  sanitizer_win.cpp\n  )\n\nset(SANITIZER_SOURCES\n  ${SANITIZER_SOURCES_NOTERMINATION}\n  sanitizer_termination.cpp\n  )\n\n# Libc functions stubs. These sources should be linked instead of\n# SANITIZER_LIBCDEP_SOURCES when sanitizer_common library must not depend on\n# libc.\nset(SANITIZER_NOLIBC_SOURCES\n  sanitizer_common_nolibc.cpp\n  )\n\nset(SANITIZER_LIBCDEP_SOURCES\n  sanitizer_common_libcdep.cpp\n  sanitizer_allocator_checks.cpp\n  sanitizer_linux_libcdep.cpp\n  sanitizer_mac_libcdep.cpp\n  sanitizer_posix_libcdep.cpp\n  sanitizer_stoptheworld_linux_libcdep.cpp\n  sanitizer_stoptheworld_netbsd_libcdep.cpp\n  )\n\nset(SANITIZER_COVERAGE_SOURCES\n  sancov_flags.cpp\n  sanitizer_coverage_fuchsia.cpp\n  sanitizer_coverage_libcdep_new.cpp\n  sanitizer_coverage_win_sections.cpp\n  )\n\nset(SANITIZER_SYMBOLIZER_SOURCES\n  sanitizer_allocator_report.cpp\n  sanitizer_chained_origin_depot.cpp\n  sanitizer_stack_store.cpp\n  sanitizer_stackdepot.cpp\n  sanitizer_stacktrace.cpp\n  sanitizer_stacktrace_libcdep.cpp\n  sanitizer_stacktrace_printer.cpp\n  sanitizer_stacktrace_sparc.cpp\n  sanitizer_symbolizer.cpp\n  sanitizer_symbolizer_libbacktrace.cpp\n  sanitizer_symbolizer_libcdep.cpp\n  sanitizer_symbolizer_mac.cpp\n  sanitizer_symbolizer_markup.cpp\n  sanitizer_symbolizer_posix_libcdep.cpp\n  sanitizer_symbolizer_report.cpp\n  sanitizer_symbolizer_win.cpp\n  sanitizer_unwind_linux_libcdep.cpp\n  sanitizer_unwind_win.cpp\n  )\n\n# Explicitly list all sanitizer_common headers. Not all of these are\n# included in sanitizer_common source files, but we need to depend on\n# headers when building our custom unit tests.\nset(SANITIZER_IMPL_HEADERS\n  sancov_flags.h\n  sancov_flags.inc\n  sanitizer_addrhashmap.h\n  sanitizer_allocator.h\n  sanitizer_allocator_checks.h\n  sanitizer_allocator_combined.h\n  sanitizer_allocator_dlsym.h\n  sanitizer_allocator_interface.h\n  sanitizer_allocator_internal.h\n  sanitizer_allocator_local_cache.h\n  sanitizer_allocator_primary32.h\n  sanitizer_allocator_primary64.h\n  sanitizer_allocator_report.h\n  sanitizer_allocator_secondary.h\n  sanitizer_allocator_size_class_map.h\n  sanitizer_allocator_stats.h\n  sanitizer_asm.h\n  sanitizer_atomic.h\n  sanitizer_atomic_clang.h\n  sanitizer_atomic_clang_mips.h\n  sanitizer_atomic_clang_other.h\n  sanitizer_atomic_clang_x86.h\n  sanitizer_atomic_msvc.h\n  sanitizer_bitvector.h\n  sanitizer_bvgraph.h\n  sanitizer_chained_origin_depot.h\n  sanitizer_common.h\n  sanitizer_common_interceptors.inc\n  sanitizer_common_interceptors_format.inc\n  sanitizer_common_interceptors_ioctl.inc\n  sanitizer_common_interface.inc\n  sanitizer_common_interface_posix.inc\n  sanitizer_common_syscalls.inc\n  sanitizer_coverage_interface.inc\n  sanitizer_dbghelp.h\n  sanitizer_deadlock_detector.h\n  sanitizer_deadlock_detector_interface.h\n  sanitizer_dense_map.h\n  sanitizer_dense_map_info.h\n  sanitizer_errno.h\n  sanitizer_errno_codes.h\n  sanitizer_file.h\n  sanitizer_flag_parser.h\n  sanitizer_flags.h\n  sanitizer_flags.inc\n  sanitizer_flat_map.h\n  sanitizer_freebsd.h\n  sanitizer_fuchsia.h\n  sanitizer_getauxval.h\n  sanitizer_hash.h\n  sanitizer_interceptors_ioctl_netbsd.inc\n  sanitizer_interface_internal.h\n  sanitizer_internal_defs.h\n  sanitizer_leb128.h\n  sanitizer_lfstack.h\n  sanitizer_libc.h\n  sanitizer_libignore.h\n  sanitizer_linux.h\n  sanitizer_list.h\n  sanitizer_local_address_space_view.h\n  sanitizer_lzw.h\n  sanitizer_mac.h\n  sanitizer_malloc_mac.inc\n  sanitizer_mutex.h\n  sanitizer_placement_new.h\n  sanitizer_platform.h\n  sanitizer_platform_interceptors.h\n  sanitizer_platform_limits_netbsd.h\n  sanitizer_platform_limits_posix.h\n  sanitizer_platform_limits_solaris.h\n  sanitizer_posix.h\n  sanitizer_procmaps.h\n  sanitizer_ptrauth.h\n  sanitizer_quarantine.h\n  sanitizer_report_decorator.h\n  sanitizer_ring_buffer.h\n  sanitizer_signal_interceptors.inc\n  sanitizer_stack_store.h\n  sanitizer_stackdepot.h\n  sanitizer_stackdepotbase.h\n  sanitizer_stacktrace.h\n  sanitizer_stacktrace_printer.h\n  sanitizer_stoptheworld.h\n  sanitizer_suppressions.h\n  sanitizer_symbolizer.h\n  sanitizer_symbolizer_fuchsia.h\n  sanitizer_symbolizer_internal.h\n  sanitizer_symbolizer_libbacktrace.h\n  sanitizer_symbolizer_mac.h\n  sanitizer_syscall_generic.inc\n  sanitizer_syscall_linux_aarch64.inc\n  sanitizer_syscall_linux_arm.inc\n  sanitizer_syscall_linux_x86_64.inc\n  sanitizer_syscall_linux_riscv64.inc\n  sanitizer_syscalls_netbsd.inc\n  sanitizer_thread_registry.h\n  sanitizer_thread_safety.h\n  sanitizer_tls_get_addr.h\n  sanitizer_vector.h\n  sanitizer_win.h\n  sanitizer_win_defs.h\n  sanitizer_win_dll_thunk.h\n  sanitizer_win_weak_interception.h\n  )\n\ninclude_directories(..)\n\nset(SANITIZER_COMMON_DEFINITIONS\n  HAVE_RPC_XDR_H=${HAVE_RPC_XDR_H})\n\nset(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})\n\n# Too many existing bugs, needs cleanup.\nappend_list_if(COMPILER_RT_HAS_WNO_FORMAT -Wno-format SANITIZER_CFLAGS)\n\nappend_rtti_flag(OFF SANITIZER_CFLAGS)\n\nappend_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=570\n               SANITIZER_CFLAGS)\nappend_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors\n               SANITIZER_CFLAGS)\n\nif(APPLE)\n  set(OS_OPTION OS ${SANITIZER_COMMON_SUPPORTED_OS})\nendif()\n\nadd_compiler_rt_object_libraries(RTSanitizerCommon\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonNoTermination\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_SOURCES_NOTERMINATION}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonNoLibc\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_NOLIBC_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonLibc\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_LIBCDEP_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonCoverage\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_COVERAGE_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonSymbolizer\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_SYMBOLIZER_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\n\nset(SANITIZER_NO_WEAK_HOOKS_CFLAGS ${SANITIZER_CFLAGS})\nlist(APPEND SANITIZER_NO_WEAK_HOOKS_CFLAGS \"-DSANITIZER_SUPPORTS_WEAK_HOOKS=0\")\nadd_compiler_rt_object_libraries(RTSanitizerCommonNoHooks\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonLibcNoHooks\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_LIBCDEP_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\nadd_compiler_rt_object_libraries(RTSanitizerCommonSymbolizerNoHooks\n  ${OS_OPTION}\n  ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n  SOURCES ${SANITIZER_SYMBOLIZER_SOURCES}\n  ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS}\n  CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS}\n  DEFS ${SANITIZER_COMMON_DEFINITIONS})\n\nif(WIN32)\n  add_compiler_rt_object_libraries(SanitizerCommonWeakInterception\n    ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES\n      sanitizer_win_weak_interception.cpp\n    CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC\n    DEFS ${SANITIZER_COMMON_DEFINITIONS})\n  add_compiler_rt_object_libraries(SancovWeakInterception\n    ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES\n      sanitizer_coverage_win_weak_interception.cpp\n    CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC\n    DEFS ${SANITIZER_COMMON_DEFINITIONS})\n\n  add_compiler_rt_object_libraries(SanitizerCommonDllThunk\n    ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES\n      sanitizer_win_dll_thunk.cpp\n    CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK\n    DEFS ${SANITIZER_COMMON_DEFINITIONS})\n  add_compiler_rt_object_libraries(SancovDllThunk\n    ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES\n      sanitizer_coverage_win_dll_thunk.cpp\n      sanitizer_coverage_win_sections.cpp\n    CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK\n    DEFS ${SANITIZER_COMMON_DEFINITIONS})\n\n  set(DYNAMIC_RUNTIME_THUNK_CFLAGS \"-DSANITIZER_DYNAMIC_RUNTIME_THUNK\")\n  if(MSVC)\n    list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS \"-Zl\")\n  elseif(CMAKE_C_COMPILER_ID MATCHES Clang)\n    list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS \"-nodefaultlibs\")\n  endif()\n  add_compiler_rt_object_libraries(SanitizerCommonDynamicRuntimeThunk\n    ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES\n      sanitizer_win_dynamic_runtime_thunk.cpp\n    CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}\n    DEFS ${SANITIZER_COMMON_DEFINITIONS})\n  add_compiler_rt_object_libraries(SancovDynamicRuntimeThunk\n    ${SANITIZER_COMMON_SUPPORTED_OS}\n    ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}\n    SOURCES\n      sanitizer_coverage_win_dynamic_runtime_thunk.cpp\n      sanitizer_coverage_win_sections.cpp\n    CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}\n    DEFS ${SANITIZER_COMMON_DEFINITIONS})\nendif()\n\n# Unit tests for common sanitizer runtime.\nif(COMPILER_RT_INCLUDE_TESTS)\n  add_subdirectory(tests)\nendif()\n"
  },
  {
    "path": "runtime/sanitizer_common/sancov_flags.cpp",
    "content": "//===-- sancov_flags.cpp ----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Sanitizer Coverage runtime flags.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sancov_flags.h\"\n#include \"sanitizer_flag_parser.h\"\n#include \"sanitizer_platform.h\"\n\nSANITIZER_INTERFACE_WEAK_DEF(const char*, __sancov_default_options, void) {\n  return \"\";\n}\n\nusing namespace __sanitizer;\n\nnamespace __sancov {\n\nSancovFlags sancov_flags_dont_use_directly;  // use via flags();\n\nvoid SancovFlags::SetDefaults() {\n#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;\n#include \"sancov_flags.inc\"\n#undef SANCOV_FLAG\n}\n\nstatic void RegisterSancovFlags(FlagParser *parser, SancovFlags *f) {\n#define SANCOV_FLAG(Type, Name, DefaultValue, Description) \\\n  RegisterFlag(parser, #Name, Description, &f->Name);\n#include \"sancov_flags.inc\"\n#undef SANCOV_FLAG\n}\n\nstatic const char *MaybeCallSancovDefaultOptions() {\n  return (&__sancov_default_options) ? __sancov_default_options() : \"\";\n}\n\nvoid InitializeSancovFlags() {\n  SancovFlags *f = sancov_flags();\n  f->SetDefaults();\n\n  FlagParser parser;\n  RegisterSancovFlags(&parser, f);\n\n  parser.ParseString(MaybeCallSancovDefaultOptions());\n  parser.ParseStringFromEnv(\"SANCOV_OPTIONS\");\n\n  ReportUnrecognizedFlags();\n  if (f->help) parser.PrintFlagDescriptions();\n}\n\n}  // namespace __sancov\n"
  },
  {
    "path": "runtime/sanitizer_common/sancov_flags.h",
    "content": "//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Sanitizer Coverage runtime flags.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANCOV_FLAGS_H\n#define SANCOV_FLAGS_H\n\n#include \"sanitizer_flag_parser.h\"\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sancov {\n\nstruct SancovFlags {\n#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;\n#include \"sancov_flags.inc\"\n#undef SANCOV_FLAG\n\n  void SetDefaults();\n};\n\nextern SancovFlags sancov_flags_dont_use_directly;\n\ninline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }\n\nvoid InitializeSancovFlags();\n\n}  // namespace __sancov\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*\n__sancov_default_options();\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sancov_flags.inc",
    "content": "//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Sanitizer Coverage runtime flags.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANCOV_FLAG\n#error \"Defnine SANCOV_FLAG prior to including this file!\"\n#endif\n\nSANCOV_FLAG(bool, symbolize, true,\n            \"If set, coverage information will be symbolized by sancov tool \"\n            \"after dumping.\")\n\nSANCOV_FLAG(bool, help, false, \"Print flags help.\")\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_addrhashmap.h",
    "content": "//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Concurrent uptr->T hashmap.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ADDRHASHMAP_H\n#define SANITIZER_ADDRHASHMAP_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_allocator_internal.h\"\n\nnamespace __sanitizer {\n\n// Concurrent uptr->T hashmap.\n// T must be a POD type, kSize is preferably a prime but can be any number.\n// Usage example:\n//\n// typedef AddrHashMap<uptr, 11> Map;\n// Map m;\n// {\n//   Map::Handle h(&m, addr);\n//   use h.operator->() to access the data\n//   if h.created() then the element was just created, and the current thread\n//     has exclusive access to it\n//   otherwise the current thread has only read access to the data\n// }\n// {\n//   Map::Handle h(&m, addr, true);\n//   this will remove the data from the map in Handle dtor\n//   the current thread has exclusive access to the data\n//   if !h.exists() then the element never existed\n// }\n// {\n//   Map::Handle h(&m, addr, false, true);\n//   this will create a new element or return a handle to an existing element\n//   if !h.created() this thread does *not* have exclusive access to the data\n// }\ntemplate<typename T, uptr kSize>\nclass AddrHashMap {\n private:\n  struct Cell {\n    atomic_uintptr_t addr;\n    T                val;\n  };\n\n  struct AddBucket {\n    uptr cap;\n    uptr size;\n    Cell cells[1];  // variable len\n  };\n\n  static const uptr kBucketSize = 3;\n\n  struct Bucket {\n    Mutex mtx;\n    atomic_uintptr_t add;\n    Cell             cells[kBucketSize];\n  };\n\n public:\n  AddrHashMap();\n\n  class Handle {\n   public:\n    Handle(AddrHashMap<T, kSize> *map, uptr addr);\n    Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);\n    Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);\n\n    ~Handle();\n    T *operator->();\n    T &operator*();\n    const T &operator*() const;\n    bool created() const;\n    bool exists() const;\n\n   private:\n    friend AddrHashMap<T, kSize>;\n    AddrHashMap<T, kSize> *map_;\n    Bucket                *bucket_;\n    Cell                  *cell_;\n    uptr                   addr_;\n    uptr                   addidx_;\n    bool                   created_;\n    bool                   remove_;\n    bool                   create_;\n  };\n\n  typedef void (*ForEachCallback)(const uptr key, const T &val, void *arg);\n  // ForEach acquires a lock on each bucket while iterating over\n  // elements. Note that this only ensures that the structure of the hashmap is\n  // unchanged, there may be a data race to the element itself.\n  void ForEach(ForEachCallback cb, void *arg);\n\n private:\n  friend class Handle;\n  Bucket *table_;\n\n  void acquire(Handle *h);\n  void release(Handle *h);\n  uptr calcHash(uptr addr);\n};\n\ntemplate <typename T, uptr kSize>\nvoid AddrHashMap<T, kSize>::ForEach(ForEachCallback cb, void *arg) {\n  for (uptr n = 0; n < kSize; n++) {\n    Bucket *bucket = &table_[n];\n\n    ReadLock lock(&bucket->mtx);\n\n    for (uptr i = 0; i < kBucketSize; i++) {\n      Cell *c = &bucket->cells[i];\n      uptr addr1 = atomic_load(&c->addr, memory_order_acquire);\n      if (addr1 != 0)\n        cb(addr1, c->val, arg);\n    }\n\n    // Iterate over any additional cells.\n    if (AddBucket *add =\n            (AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) {\n      for (uptr i = 0; i < add->size; i++) {\n        Cell *c = &add->cells[i];\n        uptr addr1 = atomic_load(&c->addr, memory_order_acquire);\n        if (addr1 != 0)\n          cb(addr1, c->val, arg);\n      }\n    }\n  }\n}\n\ntemplate<typename T, uptr kSize>\nAddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {\n  map_ = map;\n  addr_ = addr;\n  remove_ = false;\n  create_ = true;\n  map_->acquire(this);\n}\n\ntemplate<typename T, uptr kSize>\nAddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,\n    bool remove) {\n  map_ = map;\n  addr_ = addr;\n  remove_ = remove;\n  create_ = true;\n  map_->acquire(this);\n}\n\ntemplate<typename T, uptr kSize>\nAddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,\n    bool remove, bool create) {\n  map_ = map;\n  addr_ = addr;\n  remove_ = remove;\n  create_ = create;\n  map_->acquire(this);\n}\n\ntemplate<typename T, uptr kSize>\nAddrHashMap<T, kSize>::Handle::~Handle() {\n  map_->release(this);\n}\n\ntemplate <typename T, uptr kSize>\nT *AddrHashMap<T, kSize>::Handle::operator->() {\n  return &cell_->val;\n}\n\ntemplate <typename T, uptr kSize>\nconst T &AddrHashMap<T, kSize>::Handle::operator*() const {\n  return cell_->val;\n}\n\ntemplate <typename T, uptr kSize>\nT &AddrHashMap<T, kSize>::Handle::operator*() {\n  return cell_->val;\n}\n\ntemplate<typename T, uptr kSize>\nbool AddrHashMap<T, kSize>::Handle::created() const {\n  return created_;\n}\n\ntemplate<typename T, uptr kSize>\nbool AddrHashMap<T, kSize>::Handle::exists() const {\n  return cell_ != nullptr;\n}\n\ntemplate<typename T, uptr kSize>\nAddrHashMap<T, kSize>::AddrHashMap() {\n  table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), \"AddrHashMap\");\n}\n\ntemplate <typename T, uptr kSize>\nvoid AddrHashMap<T, kSize>::acquire(Handle *h)\n    SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n  uptr addr = h->addr_;\n  uptr hash = calcHash(addr);\n  Bucket *b = &table_[hash];\n\n  h->created_ = false;\n  h->addidx_ = -1U;\n  h->bucket_ = b;\n  h->cell_ = nullptr;\n\n  // If we want to remove the element, we need exclusive access to the bucket,\n  // so skip the lock-free phase.\n  if (h->remove_)\n    goto locked;\n\n retry:\n  // First try to find an existing element w/o read mutex.\n  CHECK(!h->remove_);\n  // Check the embed cells.\n  for (uptr i = 0; i < kBucketSize; i++) {\n    Cell *c = &b->cells[i];\n    uptr addr1 = atomic_load(&c->addr, memory_order_acquire);\n    if (addr1 == addr) {\n      h->cell_ = c;\n      return;\n    }\n  }\n\n  // Check the add cells with read lock.\n  if (atomic_load(&b->add, memory_order_relaxed)) {\n    b->mtx.ReadLock();\n    AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);\n    for (uptr i = 0; i < add->size; i++) {\n      Cell *c = &add->cells[i];\n      uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);\n      if (addr1 == addr) {\n        h->addidx_ = i;\n        h->cell_ = c;\n        return;\n      }\n    }\n    b->mtx.ReadUnlock();\n  }\n\n locked:\n  // Re-check existence under write lock.\n  // Embed cells.\n  b->mtx.Lock();\n  for (uptr i = 0; i < kBucketSize; i++) {\n    Cell *c = &b->cells[i];\n    uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);\n    if (addr1 == addr) {\n      if (h->remove_) {\n        h->cell_ = c;\n        return;\n      }\n      b->mtx.Unlock();\n      goto retry;\n    }\n  }\n\n  // Add cells.\n  AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);\n  if (add) {\n    for (uptr i = 0; i < add->size; i++) {\n      Cell *c = &add->cells[i];\n      uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);\n      if (addr1 == addr) {\n        if (h->remove_) {\n          h->addidx_ = i;\n          h->cell_ = c;\n          return;\n        }\n        b->mtx.Unlock();\n        goto retry;\n      }\n    }\n  }\n\n  // The element does not exist, no need to create it if we want to remove.\n  if (h->remove_ || !h->create_) {\n    b->mtx.Unlock();\n    return;\n  }\n\n  // Now try to create it under the mutex.\n  h->created_ = true;\n  // See if we have a free embed cell.\n  for (uptr i = 0; i < kBucketSize; i++) {\n    Cell *c = &b->cells[i];\n    uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);\n    if (addr1 == 0) {\n      h->cell_ = c;\n      return;\n    }\n  }\n\n  // Store in the add cells.\n  if (!add) {\n    // Allocate a new add array.\n    const uptr kInitSize = 64;\n    add = (AddBucket*)InternalAlloc(kInitSize);\n    internal_memset(add, 0, kInitSize);\n    add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;\n    add->size = 0;\n    atomic_store(&b->add, (uptr)add, memory_order_relaxed);\n  }\n  if (add->size == add->cap) {\n    // Grow existing add array.\n    uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);\n    uptr newsize = oldsize * 2;\n    AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);\n    internal_memset(add1, 0, newsize);\n    add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;\n    add1->size = add->size;\n    internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));\n    InternalFree(add);\n    atomic_store(&b->add, (uptr)add1, memory_order_relaxed);\n    add = add1;\n  }\n  // Store.\n  uptr i = add->size++;\n  Cell *c = &add->cells[i];\n  CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);\n  h->addidx_ = i;\n  h->cell_ = c;\n }\n\n template <typename T, uptr kSize>\n void AddrHashMap<T, kSize>::release(Handle *h)\n     SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n   if (!h->cell_)\n     return;\n   Bucket *b = h->bucket_;\n   Cell *c = h->cell_;\n   uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);\n   if (h->created_) {\n     // Denote completion of insertion.\n     CHECK_EQ(addr1, 0);\n     // After the following store, the element becomes available\n     // for lock-free reads.\n     atomic_store(&c->addr, h->addr_, memory_order_release);\n     b->mtx.Unlock();\n   } else if (h->remove_) {\n     // Denote that the cell is empty now.\n     CHECK_EQ(addr1, h->addr_);\n     atomic_store(&c->addr, 0, memory_order_release);\n     // See if we need to compact the bucket.\n     AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed);\n     if (h->addidx_ == -1U) {\n       // Removed from embed array, move an add element into the freed cell.\n       if (add && add->size != 0) {\n         uptr last = --add->size;\n         Cell *c1 = &add->cells[last];\n         c->val = c1->val;\n         uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);\n         atomic_store(&c->addr, addr1, memory_order_release);\n         atomic_store(&c1->addr, 0, memory_order_release);\n       }\n     } else {\n       // Removed from add array, compact it.\n       uptr last = --add->size;\n       Cell *c1 = &add->cells[last];\n       if (c != c1) {\n         *c = *c1;\n         atomic_store(&c1->addr, 0, memory_order_relaxed);\n       }\n     }\n     if (add && add->size == 0) {\n       // FIXME(dvyukov): free add?\n     }\n     b->mtx.Unlock();\n   } else {\n     CHECK_EQ(addr1, h->addr_);\n     if (h->addidx_ != -1U)\n       b->mtx.ReadUnlock();\n   }\n }\n\ntemplate<typename T, uptr kSize>\nuptr AddrHashMap<T, kSize>::calcHash(uptr addr) {\n  addr += addr << 10;\n  addr ^= addr >> 6;\n  return addr % kSize;\n}\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_ADDRHASHMAP_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator.cpp",
    "content": "//===-- sanitizer_allocator.cpp -------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n// This allocator is used inside run-times.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_allocator.h\"\n\n#include \"sanitizer_allocator_checks.h\"\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_platform.h\"\n\nnamespace __sanitizer {\n\n// Default allocator names.\nconst char *PrimaryAllocatorName = \"SizeClassAllocator\";\nconst char *SecondaryAllocatorName = \"LargeMmapAllocator\";\n\nstatic ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];\nstatic atomic_uint8_t internal_allocator_initialized;\nstatic StaticSpinMutex internal_alloc_init_mu;\n\nstatic InternalAllocatorCache internal_allocator_cache;\nstatic StaticSpinMutex internal_allocator_cache_mu;\n\nInternalAllocator *internal_allocator() {\n  InternalAllocator *internal_allocator_instance =\n      reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);\n  if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {\n    SpinMutexLock l(&internal_alloc_init_mu);\n    if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==\n        0) {\n      internal_allocator_instance->Init(kReleaseToOSIntervalNever);\n      atomic_store(&internal_allocator_initialized, 1, memory_order_release);\n    }\n  }\n  return internal_allocator_instance;\n}\n\nstatic void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,\n                              uptr alignment) {\n  if (alignment == 0) alignment = 8;\n  if (cache == 0) {\n    SpinMutexLock l(&internal_allocator_cache_mu);\n    return internal_allocator()->Allocate(&internal_allocator_cache, size,\n                                          alignment);\n  }\n  return internal_allocator()->Allocate(cache, size, alignment);\n}\n\nstatic void *RawInternalRealloc(void *ptr, uptr size,\n                                InternalAllocatorCache *cache) {\n  uptr alignment = 8;\n  if (cache == 0) {\n    SpinMutexLock l(&internal_allocator_cache_mu);\n    return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,\n                                            size, alignment);\n  }\n  return internal_allocator()->Reallocate(cache, ptr, size, alignment);\n}\n\nstatic void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {\n  if (!cache) {\n    SpinMutexLock l(&internal_allocator_cache_mu);\n    return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);\n  }\n  internal_allocator()->Deallocate(cache, ptr);\n}\n\nstatic void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {\n  SetAllocatorOutOfMemory();\n  Report(\"FATAL: %s: internal allocator is out of memory trying to allocate \"\n         \"0x%zx bytes\\n\", SanitizerToolName, requested_size);\n  Die();\n}\n\nvoid *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {\n  void *p = RawInternalAlloc(size, cache, alignment);\n  if (UNLIKELY(!p))\n    ReportInternalAllocatorOutOfMemory(size);\n  return p;\n}\n\nvoid *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {\n  void *p = RawInternalRealloc(addr, size, cache);\n  if (UNLIKELY(!p))\n    ReportInternalAllocatorOutOfMemory(size);\n  return p;\n}\n\nvoid *InternalReallocArray(void *addr, uptr count, uptr size,\n                           InternalAllocatorCache *cache) {\n  if (UNLIKELY(CheckForCallocOverflow(count, size))) {\n    Report(\n        \"FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) \"\n        \"cannot be represented in type size_t\\n\",\n        SanitizerToolName, count, size);\n    Die();\n  }\n  return InternalRealloc(addr, count * size, cache);\n}\n\nvoid *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {\n  if (UNLIKELY(CheckForCallocOverflow(count, size))) {\n    Report(\"FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) \"\n           \"cannot be represented in type size_t\\n\", SanitizerToolName, count,\n           size);\n    Die();\n  }\n  void *p = InternalAlloc(count * size, cache);\n  if (LIKELY(p))\n    internal_memset(p, 0, count * size);\n  return p;\n}\n\nvoid InternalFree(void *addr, InternalAllocatorCache *cache) {\n  RawInternalFree(addr, cache);\n}\n\nvoid InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n  internal_allocator_cache_mu.Lock();\n  internal_allocator()->ForceLock();\n}\n\nvoid InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n  internal_allocator()->ForceUnlock();\n  internal_allocator_cache_mu.Unlock();\n}\n\n// LowLevelAllocator\nconstexpr uptr kLowLevelAllocatorDefaultAlignment = 8;\nstatic uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;\nstatic LowLevelAllocateCallback low_level_alloc_callback;\n\nvoid *LowLevelAllocator::Allocate(uptr size) {\n  // Align allocation size.\n  size = RoundUpTo(size, low_level_alloc_min_alignment);\n  if (allocated_end_ - allocated_current_ < (sptr)size) {\n    uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());\n    allocated_current_ =\n        (char*)MmapOrDie(size_to_allocate, __func__);\n    allocated_end_ = allocated_current_ + size_to_allocate;\n    if (low_level_alloc_callback) {\n      low_level_alloc_callback((uptr)allocated_current_,\n                               size_to_allocate);\n    }\n  }\n  CHECK(allocated_end_ - allocated_current_ >= (sptr)size);\n  void *res = allocated_current_;\n  allocated_current_ += size;\n  return res;\n}\n\nvoid SetLowLevelAllocateMinAlignment(uptr alignment) {\n  CHECK(IsPowerOfTwo(alignment));\n  low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);\n}\n\nvoid SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {\n  low_level_alloc_callback = callback;\n}\n\n// Allocator's OOM and other errors handling support.\n\nstatic atomic_uint8_t allocator_out_of_memory = {0};\nstatic atomic_uint8_t allocator_may_return_null = {0};\n\nbool IsAllocatorOutOfMemory() {\n  return atomic_load_relaxed(&allocator_out_of_memory);\n}\n\nvoid SetAllocatorOutOfMemory() {\n  atomic_store_relaxed(&allocator_out_of_memory, 1);\n}\n\nbool AllocatorMayReturnNull() {\n  return atomic_load(&allocator_may_return_null, memory_order_relaxed);\n}\n\nvoid SetAllocatorMayReturnNull(bool may_return_null) {\n  atomic_store(&allocator_may_return_null, may_return_null,\n               memory_order_relaxed);\n}\n\nvoid PrintHintAllocatorCannotReturnNull() {\n  Report(\"HINT: if you don't care about these errors you may set \"\n         \"allocator_may_return_null=1\\n\");\n}\n\nstatic atomic_uint8_t rss_limit_exceeded;\n\nbool IsRssLimitExceeded() {\n  return atomic_load(&rss_limit_exceeded, memory_order_relaxed);\n}\n\nvoid SetRssLimitExceeded(bool limit_exceeded) {\n  atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);\n}\n\n} // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator.h",
    "content": "//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ALLOCATOR_H\n#define SANITIZER_ALLOCATOR_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flat_map.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_lfstack.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_list.h\"\n#include \"sanitizer_local_address_space_view.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_procmaps.h\"\n#include \"sanitizer_type_traits.h\"\n\nnamespace __sanitizer {\n\n// Allows the tools to name their allocations appropriately.\nextern const char *PrimaryAllocatorName;\nextern const char *SecondaryAllocatorName;\n\n// Since flags are immutable and allocator behavior can be changed at runtime\n// (unit tests or ASan on Android are some examples), allocator_may_return_null\n// flag value is cached here and can be altered later.\nbool AllocatorMayReturnNull();\nvoid SetAllocatorMayReturnNull(bool may_return_null);\n\n// Returns true if allocator detected OOM condition. Can be used to avoid memory\n// hungry operations.\nbool IsAllocatorOutOfMemory();\n// Should be called by a particular allocator when OOM is detected.\nvoid SetAllocatorOutOfMemory();\n\nvoid PrintHintAllocatorCannotReturnNull();\n\n// Callback type for iterating over chunks.\ntypedef void (*ForEachChunkCallback)(uptr chunk, void *arg);\n\ninline u32 Rand(u32 *state) {  // ANSI C linear congruential PRNG.\n  return (*state = *state * 1103515245 + 12345) >> 16;\n}\n\ninline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; }  // [0, n)\n\ntemplate<typename T>\ninline void RandomShuffle(T *a, u32 n, u32 *rand_state) {\n  if (n <= 1) return;\n  u32 state = *rand_state;\n  for (u32 i = n - 1; i > 0; i--)\n    Swap(a[i], a[RandN(&state, i + 1)]);\n  *rand_state = state;\n}\n\n#include \"sanitizer_allocator_size_class_map.h\"\n#include \"sanitizer_allocator_stats.h\"\n#include \"sanitizer_allocator_primary64.h\"\n#include \"sanitizer_allocator_primary32.h\"\n#include \"sanitizer_allocator_local_cache.h\"\n#include \"sanitizer_allocator_secondary.h\"\n#include \"sanitizer_allocator_combined.h\"\n\nbool IsRssLimitExceeded();\nvoid SetRssLimitExceeded(bool limit_exceeded);\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_ALLOCATOR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_bytemap.h",
    "content": "//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\n// Maps integers in rage [0, kSize) to u8 values.\ntemplate <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>\nclass FlatByteMap {\n public:\n  using AddressSpaceView = AddressSpaceViewTy;\n  void Init() {\n    internal_memset(map_, 0, sizeof(map_));\n  }\n\n  void set(uptr idx, u8 val) {\n    CHECK_LT(idx, kSize);\n    CHECK_EQ(0U, map_[idx]);\n    map_[idx] = val;\n  }\n  u8 operator[] (uptr idx) {\n    CHECK_LT(idx, kSize);\n    // FIXME: CHECK may be too expensive here.\n    return map_[idx];\n  }\n private:\n  u8 map_[kSize];\n};\n\n// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.\n// It is implemented as a two-dimensional array: array of kSize1 pointers\n// to kSize2-byte arrays. The secondary arrays are mmaped on demand.\n// Each value is initially zero and can be set to something else only once.\n// Setting and getting values from multiple threads is safe w/o extra locking.\ntemplate <u64 kSize1, u64 kSize2,\n          typename AddressSpaceViewTy = LocalAddressSpaceView,\n          class MapUnmapCallback = NoOpMapUnmapCallback>\nclass TwoLevelByteMap {\n public:\n  using AddressSpaceView = AddressSpaceViewTy;\n  void Init() {\n    internal_memset(map1_, 0, sizeof(map1_));\n    mu_.Init();\n  }\n\n  void TestOnlyUnmap() {\n    for (uptr i = 0; i < kSize1; i++) {\n      u8 *p = Get(i);\n      if (!p) continue;\n      MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);\n      UnmapOrDie(p, kSize2);\n    }\n  }\n\n  uptr size() const { return kSize1 * kSize2; }\n  uptr size1() const { return kSize1; }\n  uptr size2() const { return kSize2; }\n\n  void set(uptr idx, u8 val) {\n    CHECK_LT(idx, kSize1 * kSize2);\n    u8 *map2 = GetOrCreate(idx / kSize2);\n    CHECK_EQ(0U, map2[idx % kSize2]);\n    map2[idx % kSize2] = val;\n  }\n\n  u8 operator[] (uptr idx) const {\n    CHECK_LT(idx, kSize1 * kSize2);\n    u8 *map2 = Get(idx / kSize2);\n    if (!map2) return 0;\n    auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]);\n    return *value_ptr;\n  }\n\n private:\n  u8 *Get(uptr idx) const {\n    CHECK_LT(idx, kSize1);\n    return reinterpret_cast<u8 *>(\n        atomic_load(&map1_[idx], memory_order_acquire));\n  }\n\n  u8 *GetOrCreate(uptr idx) {\n    u8 *res = Get(idx);\n    if (!res) {\n      SpinMutexLock l(&mu_);\n      if (!(res = Get(idx))) {\n        res = (u8*)MmapOrDie(kSize2, \"TwoLevelByteMap\");\n        MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);\n        atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),\n                     memory_order_release);\n      }\n    }\n    return res;\n  }\n\n  atomic_uintptr_t map1_[kSize1];\n  StaticSpinMutex mu_;\n};\n\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_checks.cpp",
    "content": "//===-- sanitizer_allocator_checks.cpp --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory\n// allocators.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_errno.h\"\n\nnamespace __sanitizer {\n\nvoid SetErrnoToENOMEM() {\n  errno = errno_ENOMEM;\n}\n\n} // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_checks.h",
    "content": "//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory\n// allocators.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ALLOCATOR_CHECKS_H\n#define SANITIZER_ALLOCATOR_CHECKS_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_platform.h\"\n\nnamespace __sanitizer {\n\n// The following is defined in a separate compilation unit to avoid pulling in\n// sanitizer_errno.h in this header, which leads to conflicts when other system\n// headers include errno.h. This is usually the result of an unlikely event,\n// and as such we do not care as much about having it inlined.\nvoid SetErrnoToENOMEM();\n\n// A common errno setting logic shared by almost all sanitizer allocator APIs.\ninline void *SetErrnoOnNull(void *ptr) {\n  if (UNLIKELY(!ptr))\n    SetErrnoToENOMEM();\n  return ptr;\n}\n\n// In case of the check failure, the caller of the following Check... functions\n// should \"return POLICY::OnBadRequest();\" where POLICY is the current allocator\n// failure handling policy.\n\n// Checks aligned_alloc() parameters, verifies that the alignment is a power of\n// two and that the size is a multiple of alignment for POSIX implementation,\n// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple\n// of alignment.\ninline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {\n#if SANITIZER_POSIX\n  return alignment != 0 && IsPowerOfTwo(alignment) &&\n         (size & (alignment - 1)) == 0;\n#else\n  return alignment != 0 && size % alignment == 0;\n#endif\n}\n\n// Checks posix_memalign() parameters, verifies that alignment is a power of two\n// and a multiple of sizeof(void *).\ninline bool CheckPosixMemalignAlignment(uptr alignment) {\n  return alignment != 0 && IsPowerOfTwo(alignment) &&\n         (alignment % sizeof(void *)) == 0;\n}\n\n// Returns true if calloc(size, n) call overflows on size*n calculation.\ninline bool CheckForCallocOverflow(uptr size, uptr n) {\n  if (!size)\n    return false;\n  uptr max = (uptr)-1L;\n  return (max / size) < n;\n}\n\n// Returns true if the size passed to pvalloc overflows when rounded to the next\n// multiple of page_size.\ninline bool CheckForPvallocOverflow(uptr size, uptr page_size) {\n  return RoundUpTo(size, page_size) < size;\n}\n\n} // namespace __sanitizer\n\n#endif  // SANITIZER_ALLOCATOR_CHECKS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_combined.h",
    "content": "//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\n// This class implements a complete memory allocator by using two\n// internal allocators:\n// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).\n//  When allocating 2^x bytes it should return 2^x aligned chunk.\n// PrimaryAllocator is used via a local AllocatorCache.\n// SecondaryAllocator can allocate anything, but is not efficient.\ntemplate <class PrimaryAllocator,\n          class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>\nclass CombinedAllocator {\n public:\n  using AllocatorCache = typename PrimaryAllocator::AllocatorCache;\n  using SecondaryAllocator =\n      LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,\n                         LargeMmapAllocatorPtrArray,\n                         typename PrimaryAllocator::AddressSpaceView>;\n\n  void InitLinkerInitialized(s32 release_to_os_interval_ms) {\n    stats_.InitLinkerInitialized();\n    primary_.Init(release_to_os_interval_ms);\n    secondary_.InitLinkerInitialized();\n  }\n\n  void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {\n    stats_.Init();\n    primary_.Init(release_to_os_interval_ms, heap_start);\n    secondary_.Init();\n  }\n\n  void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {\n    // Returning 0 on malloc(0) may break a lot of code.\n    if (size == 0)\n      size = 1;\n    if (size + alignment < size) {\n      Report(\"WARNING: %s: CombinedAllocator allocation overflow: \"\n             \"0x%zx bytes with 0x%zx alignment requested\\n\",\n             SanitizerToolName, size, alignment);\n      return nullptr;\n    }\n    uptr original_size = size;\n    // If alignment requirements are to be fulfilled by the frontend allocator\n    // rather than by the primary or secondary, passing an alignment lower than\n    // or equal to 8 will prevent any further rounding up, as well as the later\n    // alignment check.\n    if (alignment > 8)\n      size = RoundUpTo(size, alignment);\n    // The primary allocator should return a 2^x aligned allocation when\n    // requested 2^x bytes, hence using the rounded up 'size' when being\n    // serviced by the primary (this is no longer true when the primary is\n    // using a non-fixed base address). The secondary takes care of the\n    // alignment without such requirement, and allocating 'size' would use\n    // extraneous memory, so we employ 'original_size'.\n    void *res;\n    if (primary_.CanAllocate(size, alignment))\n      res = cache->Allocate(&primary_, primary_.ClassID(size));\n    else\n      res = secondary_.Allocate(&stats_, original_size, alignment);\n    if (alignment > 8)\n      CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);\n    return res;\n  }\n\n  s32 ReleaseToOSIntervalMs() const {\n    return primary_.ReleaseToOSIntervalMs();\n  }\n\n  void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {\n    primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);\n  }\n\n  void ForceReleaseToOS() {\n    primary_.ForceReleaseToOS();\n  }\n\n  void Deallocate(AllocatorCache *cache, void *p) {\n    if (!p) return;\n    if (primary_.PointerIsMine(p))\n      cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);\n    else\n      secondary_.Deallocate(&stats_, p);\n  }\n\n  void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,\n                   uptr alignment) {\n    if (!p)\n      return Allocate(cache, new_size, alignment);\n    if (!new_size) {\n      Deallocate(cache, p);\n      return nullptr;\n    }\n    CHECK(PointerIsMine(p));\n    uptr old_size = GetActuallyAllocatedSize(p);\n    uptr memcpy_size = Min(new_size, old_size);\n    void *new_p = Allocate(cache, new_size, alignment);\n    if (new_p)\n      internal_memcpy(new_p, p, memcpy_size);\n    Deallocate(cache, p);\n    return new_p;\n  }\n\n  bool PointerIsMine(const void *p) const {\n    if (primary_.PointerIsMine(p))\n      return true;\n    return secondary_.PointerIsMine(p);\n  }\n\n  bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }\n\n  void *GetMetaData(const void *p) {\n    if (primary_.PointerIsMine(p))\n      return primary_.GetMetaData(p);\n    return secondary_.GetMetaData(p);\n  }\n\n  void *GetBlockBegin(const void *p) {\n    if (primary_.PointerIsMine(p))\n      return primary_.GetBlockBegin(p);\n    return secondary_.GetBlockBegin(p);\n  }\n\n  // This function does the same as GetBlockBegin, but is much faster.\n  // Must be called with the allocator locked.\n  void *GetBlockBeginFastLocked(void *p) {\n    if (primary_.PointerIsMine(p))\n      return primary_.GetBlockBegin(p);\n    return secondary_.GetBlockBeginFastLocked(p);\n  }\n\n  uptr GetActuallyAllocatedSize(void *p) {\n    if (primary_.PointerIsMine(p))\n      return primary_.GetActuallyAllocatedSize(p);\n    return secondary_.GetActuallyAllocatedSize(p);\n  }\n\n  uptr TotalMemoryUsed() {\n    return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();\n  }\n\n  void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }\n\n  void InitCache(AllocatorCache *cache) {\n    cache->Init(&stats_);\n  }\n\n  void DestroyCache(AllocatorCache *cache) {\n    cache->Destroy(&primary_, &stats_);\n  }\n\n  void SwallowCache(AllocatorCache *cache) {\n    cache->Drain(&primary_);\n  }\n\n  void GetStats(AllocatorStatCounters s) const {\n    stats_.Get(s);\n  }\n\n  void PrintStats() {\n    primary_.PrintStats();\n    secondary_.PrintStats();\n  }\n\n  // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone\n  // introspection API.\n  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n    primary_.ForceLock();\n    secondary_.ForceLock();\n  }\n\n  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n    secondary_.ForceUnlock();\n    primary_.ForceUnlock();\n  }\n\n  // Iterate over all existing chunks.\n  // The allocator must be locked when calling this function.\n  void ForEachChunk(ForEachChunkCallback callback, void *arg) {\n    primary_.ForEachChunk(callback, arg);\n    secondary_.ForEachChunk(callback, arg);\n  }\n\n private:\n  PrimaryAllocator primary_;\n  SecondaryAllocator secondary_;\n  AllocatorGlobalStats stats_;\n};\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_dlsym.h",
    "content": "//===-- sanitizer_allocator_dlsym.h -----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Hack: Sanitizer initializer calls dlsym which may need to allocate and call\n// back into uninitialized sanitizer.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ALLOCATOR_DLSYM_H\n#define SANITIZER_ALLOCATOR_DLSYM_H\n\n#include \"sanitizer_allocator_internal.h\"\n\nnamespace __sanitizer {\n\ntemplate <typename Details>\nstruct DlSymAllocator {\n  static bool Use() {\n    // Fuchsia doesn't use dlsym-based interceptors.\n    return !SANITIZER_FUCHSIA && UNLIKELY(Details::UseImpl());\n  }\n\n  static bool PointerIsMine(const void *ptr) {\n    // Fuchsia doesn't use dlsym-based interceptors.\n    return !SANITIZER_FUCHSIA &&\n           UNLIKELY(internal_allocator()->FromPrimary(ptr));\n  }\n\n  static void *Allocate(uptr size_in_bytes) {\n    void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize);\n    CHECK(internal_allocator()->FromPrimary(ptr));\n    Details::OnAllocate(ptr,\n                        internal_allocator()->GetActuallyAllocatedSize(ptr));\n    return ptr;\n  }\n\n  static void *Callocate(SIZE_T nmemb, SIZE_T size) {\n    void *ptr = InternalCalloc(nmemb, size);\n    CHECK(internal_allocator()->FromPrimary(ptr));\n    Details::OnAllocate(ptr,\n                        internal_allocator()->GetActuallyAllocatedSize(ptr));\n    return ptr;\n  }\n\n  static void Free(void *ptr) {\n    uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);\n    Details::OnFree(ptr, size);\n    InternalFree(ptr);\n  }\n\n  static void *Realloc(void *ptr, uptr new_size) {\n    if (!ptr)\n      return Allocate(new_size);\n    CHECK(internal_allocator()->FromPrimary(ptr));\n    if (!new_size) {\n      Free(ptr);\n      return nullptr;\n    }\n    uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);\n    uptr memcpy_size = Min(new_size, size);\n    void *new_ptr = Allocate(new_size);\n    if (new_ptr)\n      internal_memcpy(new_ptr, ptr, memcpy_size);\n    Free(ptr);\n    return new_ptr;\n  }\n\n  static void OnAllocate(const void *ptr, uptr size) {}\n  static void OnFree(const void *ptr, uptr size) {}\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_ALLOCATOR_DLSYM_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_interface.h",
    "content": "//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Re-declaration of functions from public sanitizer allocator interface.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ALLOCATOR_INTERFACE_H\n#define SANITIZER_ALLOCATOR_INTERFACE_H\n\n#include \"sanitizer_internal_defs.h\"\n\nusing __sanitizer::uptr;\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE\nuptr __sanitizer_get_estimated_allocated_size(uptr size);\nSANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);\nSANITIZER_INTERFACE_ATTRIBUTE uptr\n__sanitizer_get_allocated_size(const void *p);\nSANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();\nSANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();\nSANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();\nSANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();\n\nSANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(\n    void (*malloc_hook)(const void *, uptr),\n    void (*free_hook)(const void *));\n\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n    void __sanitizer_malloc_hook(void *ptr, uptr size);\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n    void __sanitizer_free_hook(void *ptr);\n\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n__sanitizer_purge_allocator();\n\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);\n}  // extern \"C\"\n\n#endif  // SANITIZER_ALLOCATOR_INTERFACE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_internal.h",
    "content": "//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This allocator is used inside run-times.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ALLOCATOR_INTERNAL_H\n#define SANITIZER_ALLOCATOR_INTERNAL_H\n\n#include \"sanitizer_allocator.h\"\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\n// FIXME: Check if we may use even more compact size class map for internal\n// purposes.\ntypedef CompactSizeClassMap InternalSizeClassMap;\n\nstruct AP32 {\n  static const uptr kSpaceBeg = 0;\n  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;\n  static const uptr kMetadataSize = 0;\n  typedef InternalSizeClassMap SizeClassMap;\n  static const uptr kRegionSizeLog = 20;\n  using AddressSpaceView = LocalAddressSpaceView;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n};\ntypedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;\n\ntypedef CombinedAllocator<PrimaryInternalAllocator,\n                          LargeMmapAllocatorPtrArrayStatic>\n    InternalAllocator;\ntypedef InternalAllocator::AllocatorCache InternalAllocatorCache;\n\nvoid *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,\n                    uptr alignment = 0);\nvoid *InternalRealloc(void *p, uptr size,\n                      InternalAllocatorCache *cache = nullptr);\nvoid *InternalReallocArray(void *p, uptr count, uptr size,\n                           InternalAllocatorCache *cache = nullptr);\nvoid *InternalCalloc(uptr count, uptr size,\n                     InternalAllocatorCache *cache = nullptr);\nvoid InternalFree(void *p, InternalAllocatorCache *cache = nullptr);\nvoid InternalAllocatorLock();\nvoid InternalAllocatorUnlock();\nInternalAllocator *internal_allocator();\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_ALLOCATOR_INTERNAL_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_local_cache.h",
    "content": "//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\n// Cache used by SizeClassAllocator64.\ntemplate <class SizeClassAllocator>\nstruct SizeClassAllocator64LocalCache {\n  typedef SizeClassAllocator Allocator;\n  typedef MemoryMapper<Allocator> MemoryMapperT;\n\n  void Init(AllocatorGlobalStats *s) {\n    stats_.Init();\n    if (s)\n      s->Register(&stats_);\n  }\n\n  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {\n    Drain(allocator);\n    if (s)\n      s->Unregister(&stats_);\n  }\n\n  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {\n    CHECK_NE(class_id, 0UL);\n    CHECK_LT(class_id, kNumClasses);\n    PerClass *c = &per_class_[class_id];\n    if (UNLIKELY(c->count == 0)) {\n      if (UNLIKELY(!Refill(c, allocator, class_id)))\n        return nullptr;\n      DCHECK_GT(c->count, 0);\n    }\n    CompactPtrT chunk = c->chunks[--c->count];\n    stats_.Add(AllocatorStatAllocated, c->class_size);\n    return reinterpret_cast<void *>(allocator->CompactPtrToPointer(\n        allocator->GetRegionBeginBySizeClass(class_id), chunk));\n  }\n\n  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {\n    CHECK_NE(class_id, 0UL);\n    CHECK_LT(class_id, kNumClasses);\n    // If the first allocator call on a new thread is a deallocation, then\n    // max_count will be zero, leading to check failure.\n    PerClass *c = &per_class_[class_id];\n    InitCache(c);\n    if (UNLIKELY(c->count == c->max_count))\n      DrainHalfMax(c, allocator, class_id);\n    CompactPtrT chunk = allocator->PointerToCompactPtr(\n        allocator->GetRegionBeginBySizeClass(class_id),\n        reinterpret_cast<uptr>(p));\n    c->chunks[c->count++] = chunk;\n    stats_.Sub(AllocatorStatAllocated, c->class_size);\n  }\n\n  void Drain(SizeClassAllocator *allocator) {\n    MemoryMapperT memory_mapper(*allocator);\n    for (uptr i = 1; i < kNumClasses; i++) {\n      PerClass *c = &per_class_[i];\n      while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);\n    }\n  }\n\n private:\n  typedef typename Allocator::SizeClassMapT SizeClassMap;\n  static const uptr kNumClasses = SizeClassMap::kNumClasses;\n  typedef typename Allocator::CompactPtrT CompactPtrT;\n\n  struct PerClass {\n    u32 count;\n    u32 max_count;\n    uptr class_size;\n    CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];\n  };\n  PerClass per_class_[kNumClasses];\n  AllocatorStats stats_;\n\n  void InitCache(PerClass *c) {\n    if (LIKELY(c->max_count))\n      return;\n    for (uptr i = 1; i < kNumClasses; i++) {\n      PerClass *c = &per_class_[i];\n      const uptr size = Allocator::ClassIdToSize(i);\n      c->max_count = 2 * SizeClassMap::MaxCachedHint(size);\n      c->class_size = size;\n    }\n    DCHECK_NE(c->max_count, 0UL);\n  }\n\n  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,\n                       uptr class_id) {\n    InitCache(c);\n    const uptr num_requested_chunks = c->max_count / 2;\n    if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,\n                                              num_requested_chunks)))\n      return false;\n    c->count = num_requested_chunks;\n    return true;\n  }\n\n  NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,\n                             uptr class_id) {\n    MemoryMapperT memory_mapper(*allocator);\n    Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);\n  }\n\n  void Drain(MemoryMapperT *memory_mapper, PerClass *c,\n             SizeClassAllocator *allocator, uptr class_id, uptr count) {\n    CHECK_GE(c->count, count);\n    const uptr first_idx_to_drain = c->count - count;\n    c->count -= count;\n    allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,\n                                 &c->chunks[first_idx_to_drain], count);\n  }\n};\n\n// Cache used by SizeClassAllocator32.\ntemplate <class SizeClassAllocator>\nstruct SizeClassAllocator32LocalCache {\n  typedef SizeClassAllocator Allocator;\n  typedef typename Allocator::TransferBatch TransferBatch;\n\n  void Init(AllocatorGlobalStats *s) {\n    stats_.Init();\n    if (s)\n      s->Register(&stats_);\n  }\n\n  // Returns a TransferBatch suitable for class_id.\n  TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,\n                             TransferBatch *b) {\n    if (uptr batch_class_id = per_class_[class_id].batch_class_id)\n      return (TransferBatch*)Allocate(allocator, batch_class_id);\n    return b;\n  }\n\n  // Destroys TransferBatch b.\n  void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,\n                    TransferBatch *b) {\n    if (uptr batch_class_id = per_class_[class_id].batch_class_id)\n      Deallocate(allocator, batch_class_id, b);\n  }\n\n  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {\n    Drain(allocator);\n    if (s)\n      s->Unregister(&stats_);\n  }\n\n  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {\n    CHECK_NE(class_id, 0UL);\n    CHECK_LT(class_id, kNumClasses);\n    PerClass *c = &per_class_[class_id];\n    if (UNLIKELY(c->count == 0)) {\n      if (UNLIKELY(!Refill(c, allocator, class_id)))\n        return nullptr;\n      DCHECK_GT(c->count, 0);\n    }\n    void *res = c->batch[--c->count];\n    PREFETCH(c->batch[c->count - 1]);\n    stats_.Add(AllocatorStatAllocated, c->class_size);\n    return res;\n  }\n\n  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {\n    CHECK_NE(class_id, 0UL);\n    CHECK_LT(class_id, kNumClasses);\n    // If the first allocator call on a new thread is a deallocation, then\n    // max_count will be zero, leading to check failure.\n    PerClass *c = &per_class_[class_id];\n    InitCache(c);\n    if (UNLIKELY(c->count == c->max_count))\n      Drain(c, allocator, class_id);\n    c->batch[c->count++] = p;\n    stats_.Sub(AllocatorStatAllocated, c->class_size);\n  }\n\n  void Drain(SizeClassAllocator *allocator) {\n    for (uptr i = 1; i < kNumClasses; i++) {\n      PerClass *c = &per_class_[i];\n      while (c->count > 0)\n        Drain(c, allocator, i);\n    }\n  }\n\n private:\n  typedef typename Allocator::SizeClassMapT SizeClassMap;\n  static const uptr kBatchClassID = SizeClassMap::kBatchClassID;\n  static const uptr kNumClasses = SizeClassMap::kNumClasses;\n  // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are\n  // allocated from kBatchClassID size class (except for those that are needed\n  // for kBatchClassID itself). The goal is to have TransferBatches in a totally\n  // different region of RAM to improve security.\n  static const bool kUseSeparateSizeClassForBatch =\n      Allocator::kUseSeparateSizeClassForBatch;\n\n  struct PerClass {\n    uptr count;\n    uptr max_count;\n    uptr class_size;\n    uptr batch_class_id;\n    void *batch[2 * TransferBatch::kMaxNumCached];\n  };\n  PerClass per_class_[kNumClasses];\n  AllocatorStats stats_;\n\n  void InitCache(PerClass *c) {\n    if (LIKELY(c->max_count))\n      return;\n    const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));\n    for (uptr i = 1; i < kNumClasses; i++) {\n      PerClass *c = &per_class_[i];\n      const uptr size = Allocator::ClassIdToSize(i);\n      const uptr max_cached = TransferBatch::MaxCached(size);\n      c->max_count = 2 * max_cached;\n      c->class_size = size;\n      // Precompute the class id to use to store batches for the current class\n      // id. 0 means the class size is large enough to store a batch within one\n      // of the chunks. If using a separate size class, it will always be\n      // kBatchClassID, except for kBatchClassID itself.\n      if (kUseSeparateSizeClassForBatch) {\n        c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;\n      } else {\n        c->batch_class_id = (size <\n          TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?\n              batch_class_id : 0;\n      }\n    }\n    DCHECK_NE(c->max_count, 0UL);\n  }\n\n  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,\n                       uptr class_id) {\n    InitCache(c);\n    TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);\n    if (UNLIKELY(!b))\n      return false;\n    CHECK_GT(b->Count(), 0);\n    b->CopyToArray(c->batch);\n    c->count = b->Count();\n    DestroyBatch(class_id, allocator, b);\n    return true;\n  }\n\n  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,\n                      uptr class_id) {\n    const uptr count = Min(c->max_count / 2, c->count);\n    const uptr first_idx_to_drain = c->count - count;\n    TransferBatch *b = CreateBatch(\n        class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);\n    // Failure to allocate a batch while releasing memory is non recoverable.\n    // TODO(alekseys): Figure out how to do it without allocating a new batch.\n    if (UNLIKELY(!b)) {\n      Report(\"FATAL: Internal error: %s's allocator failed to allocate a \"\n             \"transfer batch.\\n\", SanitizerToolName);\n      Die();\n    }\n    b->SetFromArray(&c->batch[first_idx_to_drain], count);\n    c->count -= count;\n    allocator->DeallocateBatch(&stats_, class_id, b);\n  }\n};\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_primary32.h",
    "content": "//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\ntemplate<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;\n\n// SizeClassAllocator32 -- allocator for 32-bit address space.\n// This allocator can theoretically be used on 64-bit arch, but there it is less\n// efficient than SizeClassAllocator64.\n//\n// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can\n// be returned by MmapOrDie().\n//\n// Region:\n//   a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,\n//                                                             kRegionSize).\n// Since the regions are aligned by kRegionSize, there are exactly\n// kNumPossibleRegions possible regions in the address space and so we keep\n// a ByteMap possible_regions to store the size classes of each Region.\n// 0 size class means the region is not used by the allocator.\n//\n// One Region is used to allocate chunks of a single size class.\n// A Region looks like this:\n// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1\n//\n// In order to avoid false sharing the objects of this class should be\n// chache-line aligned.\n\nstruct SizeClassAllocator32FlagMasks {  //  Bit masks.\n  enum {\n    kRandomShuffleChunks = 1,\n    kUseSeparateSizeClassForBatch = 2,\n  };\n};\n\ntemplate <class Params>\nclass SizeClassAllocator32 {\n private:\n  static const u64 kTwoLevelByteMapSize1 =\n      (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;\n  static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;\n\n public:\n  using AddressSpaceView = typename Params::AddressSpaceView;\n  static const uptr kSpaceBeg = Params::kSpaceBeg;\n  static const u64 kSpaceSize = Params::kSpaceSize;\n  static const uptr kMetadataSize = Params::kMetadataSize;\n  typedef typename Params::SizeClassMap SizeClassMap;\n  static const uptr kRegionSizeLog = Params::kRegionSizeLog;\n  typedef typename Params::MapUnmapCallback MapUnmapCallback;\n  using ByteMap = typename conditional<\n      (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),\n      FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),\n                  AddressSpaceView>,\n      TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;\n\n  COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||\n                 (kSpaceSize & (kSpaceSize - 1)) == 0);\n\n  static const bool kRandomShuffleChunks = Params::kFlags &\n      SizeClassAllocator32FlagMasks::kRandomShuffleChunks;\n  static const bool kUseSeparateSizeClassForBatch = Params::kFlags &\n      SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;\n\n  struct TransferBatch {\n    static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;\n    void SetFromArray(void *batch[], uptr count) {\n      DCHECK_LE(count, kMaxNumCached);\n      count_ = count;\n      for (uptr i = 0; i < count; i++)\n        batch_[i] = batch[i];\n    }\n    uptr Count() const { return count_; }\n    void Clear() { count_ = 0; }\n    void Add(void *ptr) {\n      batch_[count_++] = ptr;\n      DCHECK_LE(count_, kMaxNumCached);\n    }\n    void CopyToArray(void *to_batch[]) const {\n      for (uptr i = 0, n = Count(); i < n; i++)\n        to_batch[i] = batch_[i];\n    }\n\n    // How much memory do we need for a batch containing n elements.\n    static uptr AllocationSizeRequiredForNElements(uptr n) {\n      return sizeof(uptr) * 2 + sizeof(void *) * n;\n    }\n    static uptr MaxCached(uptr size) {\n      return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));\n    }\n\n    TransferBatch *next;\n\n   private:\n    uptr count_;\n    void *batch_[kMaxNumCached];\n  };\n\n  static const uptr kBatchSize = sizeof(TransferBatch);\n  COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);\n  COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));\n\n  static uptr ClassIdToSize(uptr class_id) {\n    return (class_id == SizeClassMap::kBatchClassID) ?\n        kBatchSize : SizeClassMap::Size(class_id);\n  }\n\n  typedef SizeClassAllocator32<Params> ThisT;\n  typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;\n\n  void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {\n    CHECK(!heap_start);\n    possible_regions.Init();\n    internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));\n  }\n\n  s32 ReleaseToOSIntervalMs() const {\n    return kReleaseToOSIntervalNever;\n  }\n\n  void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {\n    // This is empty here. Currently only implemented in 64-bit allocator.\n  }\n\n  void ForceReleaseToOS() {\n    // Currently implemented in 64-bit allocator only.\n  }\n\n  void *MapWithCallback(uptr size) {\n    void *res = MmapOrDie(size, PrimaryAllocatorName);\n    MapUnmapCallback().OnMap((uptr)res, size);\n    return res;\n  }\n\n  void UnmapWithCallback(uptr beg, uptr size) {\n    MapUnmapCallback().OnUnmap(beg, size);\n    UnmapOrDie(reinterpret_cast<void *>(beg), size);\n  }\n\n  static bool CanAllocate(uptr size, uptr alignment) {\n    return size <= SizeClassMap::kMaxSize &&\n      alignment <= SizeClassMap::kMaxSize;\n  }\n\n  void *GetMetaData(const void *p) {\n    CHECK(kMetadataSize);\n    CHECK(PointerIsMine(p));\n    uptr mem = reinterpret_cast<uptr>(p);\n    uptr beg = ComputeRegionBeg(mem);\n    uptr size = ClassIdToSize(GetSizeClass(p));\n    u32 offset = mem - beg;\n    uptr n = offset / (u32)size;  // 32-bit division\n    uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;\n    return reinterpret_cast<void*>(meta);\n  }\n\n  NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,\n                                        uptr class_id) {\n    DCHECK_LT(class_id, kNumClasses);\n    SizeClassInfo *sci = GetSizeClassInfo(class_id);\n    SpinMutexLock l(&sci->mutex);\n    if (sci->free_list.empty()) {\n      if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))\n        return nullptr;\n      DCHECK(!sci->free_list.empty());\n    }\n    TransferBatch *b = sci->free_list.front();\n    sci->free_list.pop_front();\n    return b;\n  }\n\n  NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,\n                                TransferBatch *b) {\n    DCHECK_LT(class_id, kNumClasses);\n    CHECK_GT(b->Count(), 0);\n    SizeClassInfo *sci = GetSizeClassInfo(class_id);\n    SpinMutexLock l(&sci->mutex);\n    sci->free_list.push_front(b);\n  }\n\n  bool PointerIsMine(const void *p) const {\n    uptr mem = reinterpret_cast<uptr>(p);\n    if (SANITIZER_SIGN_EXTENDED_ADDRESSES)\n      mem &= (kSpaceSize - 1);\n    if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)\n      return false;\n    return GetSizeClass(p) != 0;\n  }\n\n  uptr GetSizeClass(const void *p) const {\n    uptr id = ComputeRegionId(reinterpret_cast<uptr>(p));\n    return possible_regions.contains(id) ? possible_regions[id] : 0;\n  }\n\n  void *GetBlockBegin(const void *p) {\n    CHECK(PointerIsMine(p));\n    uptr mem = reinterpret_cast<uptr>(p);\n    uptr beg = ComputeRegionBeg(mem);\n    uptr size = ClassIdToSize(GetSizeClass(p));\n    u32 offset = mem - beg;\n    u32 n = offset / (u32)size;  // 32-bit division\n    uptr res = beg + (n * (u32)size);\n    return reinterpret_cast<void*>(res);\n  }\n\n  uptr GetActuallyAllocatedSize(void *p) {\n    CHECK(PointerIsMine(p));\n    return ClassIdToSize(GetSizeClass(p));\n  }\n\n  static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }\n\n  uptr TotalMemoryUsed() {\n    // No need to lock here.\n    uptr res = 0;\n    for (uptr i = 0; i < kNumPossibleRegions; i++)\n      if (possible_regions[i])\n        res += kRegionSize;\n    return res;\n  }\n\n  void TestOnlyUnmap() {\n    for (uptr i = 0; i < kNumPossibleRegions; i++)\n      if (possible_regions[i])\n        UnmapWithCallback((i * kRegionSize), kRegionSize);\n  }\n\n  // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone\n  // introspection API.\n  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n    for (uptr i = 0; i < kNumClasses; i++) {\n      GetSizeClassInfo(i)->mutex.Lock();\n    }\n  }\n\n  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n    for (int i = kNumClasses - 1; i >= 0; i--) {\n      GetSizeClassInfo(i)->mutex.Unlock();\n    }\n  }\n\n  // Iterate over all existing chunks.\n  // The allocator must be locked when calling this function.\n  void ForEachChunk(ForEachChunkCallback callback, void *arg) const {\n    for (uptr region = 0; region < kNumPossibleRegions; region++)\n      if (possible_regions.contains(region) && possible_regions[region]) {\n        uptr chunk_size = ClassIdToSize(possible_regions[region]);\n        uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);\n        uptr region_beg = region * kRegionSize;\n        for (uptr chunk = region_beg;\n             chunk < region_beg + max_chunks_in_region * chunk_size;\n             chunk += chunk_size) {\n          // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));\n          callback(chunk, arg);\n        }\n      }\n  }\n\n  void PrintStats() {}\n\n  static uptr AdditionalSize() { return 0; }\n\n  typedef SizeClassMap SizeClassMapT;\n  static const uptr kNumClasses = SizeClassMap::kNumClasses;\n\n private:\n  static const uptr kRegionSize = 1 << kRegionSizeLog;\n  static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;\n\n  struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {\n    StaticSpinMutex mutex;\n    IntrusiveList<TransferBatch> free_list;\n    u32 rand_state;\n  };\n  COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);\n\n  uptr ComputeRegionId(uptr mem) const {\n    if (SANITIZER_SIGN_EXTENDED_ADDRESSES)\n      mem &= (kSpaceSize - 1);\n    const uptr res = mem >> kRegionSizeLog;\n    CHECK_LT(res, kNumPossibleRegions);\n    return res;\n  }\n\n  uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }\n\n  uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {\n    DCHECK_LT(class_id, kNumClasses);\n    const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(\n        kRegionSize, kRegionSize, PrimaryAllocatorName));\n    if (UNLIKELY(!res))\n      return 0;\n    MapUnmapCallback().OnMap(res, kRegionSize);\n    stat->Add(AllocatorStatMapped, kRegionSize);\n    CHECK(IsAligned(res, kRegionSize));\n    possible_regions[ComputeRegionId(res)] = class_id;\n    return res;\n  }\n\n  SizeClassInfo *GetSizeClassInfo(uptr class_id) {\n    DCHECK_LT(class_id, kNumClasses);\n    return &size_class_info_array[class_id];\n  }\n\n  bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,\n                       TransferBatch **current_batch, uptr max_count,\n                       uptr *pointers_array, uptr count) {\n    // If using a separate class for batches, we do not need to shuffle it.\n    if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||\n        class_id != SizeClassMap::kBatchClassID))\n      RandomShuffle(pointers_array, count, &sci->rand_state);\n    TransferBatch *b = *current_batch;\n    for (uptr i = 0; i < count; i++) {\n      if (!b) {\n        b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);\n        if (UNLIKELY(!b))\n          return false;\n        b->Clear();\n      }\n      b->Add((void*)pointers_array[i]);\n      if (b->Count() == max_count) {\n        sci->free_list.push_back(b);\n        b = nullptr;\n      }\n    }\n    *current_batch = b;\n    return true;\n  }\n\n  bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,\n                        SizeClassInfo *sci, uptr class_id) {\n    const uptr region = AllocateRegion(stat, class_id);\n    if (UNLIKELY(!region))\n      return false;\n    if (kRandomShuffleChunks)\n      if (UNLIKELY(sci->rand_state == 0))\n        // The random state is initialized from ASLR (PIE) and time.\n        sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();\n    const uptr size = ClassIdToSize(class_id);\n    const uptr n_chunks = kRegionSize / (size + kMetadataSize);\n    const uptr max_count = TransferBatch::MaxCached(size);\n    DCHECK_GT(max_count, 0);\n    TransferBatch *b = nullptr;\n    constexpr uptr kShuffleArraySize = 48;\n    uptr shuffle_array[kShuffleArraySize];\n    uptr count = 0;\n    for (uptr i = region; i < region + n_chunks * size; i += size) {\n      shuffle_array[count++] = i;\n      if (count == kShuffleArraySize) {\n        if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,\n                                      shuffle_array, count)))\n          return false;\n        count = 0;\n      }\n    }\n    if (count) {\n      if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,\n                                    shuffle_array, count)))\n        return false;\n    }\n    if (b) {\n      CHECK_GT(b->Count(), 0);\n      sci->free_list.push_back(b);\n    }\n    return true;\n  }\n\n  ByteMap possible_regions;\n  SizeClassInfo size_class_info_array[kNumClasses];\n};\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_primary64.h",
    "content": "//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\ntemplate<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;\n\n// SizeClassAllocator64 -- allocator for 64-bit address space.\n// The template parameter Params is a class containing the actual parameters.\n//\n// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.\n// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.\n// Otherwise SpaceBeg=kSpaceBeg (fixed address).\n// kSpaceSize is a power of two.\n// At the beginning the entire space is mprotect-ed, then small parts of it\n// are mapped on demand.\n//\n// Region: a part of Space dedicated to a single size class.\n// There are kNumClasses Regions of equal size.\n//\n// UserChunk: a piece of memory returned to user.\n// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.\n\n// FreeArray is an array free-d chunks (stored as 4-byte offsets)\n//\n// A Region looks like this:\n// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray\n\nstruct SizeClassAllocator64FlagMasks {  //  Bit masks.\n  enum {\n    kRandomShuffleChunks = 1,\n  };\n};\n\ntemplate <typename Allocator>\nclass MemoryMapper {\n public:\n  typedef typename Allocator::CompactPtrT CompactPtrT;\n\n  explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}\n\n  bool GetAndResetStats(uptr &ranges, uptr &bytes) {\n    ranges = released_ranges_count_;\n    released_ranges_count_ = 0;\n    bytes = released_bytes_;\n    released_bytes_ = 0;\n    return ranges != 0;\n  }\n\n  u64 *MapPackedCounterArrayBuffer(uptr count) {\n    buffer_.clear();\n    buffer_.resize(count);\n    return buffer_.data();\n  }\n\n  // Releases [from, to) range of pages back to OS.\n  void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {\n    const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);\n    const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);\n    const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);\n    ReleaseMemoryPagesToOS(from_page, to_page);\n    released_ranges_count_++;\n    released_bytes_ += to_page - from_page;\n  }\n\n private:\n  const Allocator &allocator_;\n  uptr released_ranges_count_ = 0;\n  uptr released_bytes_ = 0;\n  InternalMmapVector<u64> buffer_;\n};\n\ntemplate <class Params>\nclass SizeClassAllocator64 {\n public:\n  using AddressSpaceView = typename Params::AddressSpaceView;\n  static const uptr kSpaceBeg = Params::kSpaceBeg;\n  static const uptr kSpaceSize = Params::kSpaceSize;\n  static const uptr kMetadataSize = Params::kMetadataSize;\n  typedef typename Params::SizeClassMap SizeClassMap;\n  typedef typename Params::MapUnmapCallback MapUnmapCallback;\n\n  static const bool kRandomShuffleChunks =\n      Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;\n\n  typedef SizeClassAllocator64<Params> ThisT;\n  typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;\n  typedef MemoryMapper<ThisT> MemoryMapperT;\n\n  // When we know the size class (the region base) we can represent a pointer\n  // as a 4-byte integer (offset from the region start shifted right by 4).\n  typedef u32 CompactPtrT;\n  static const uptr kCompactPtrScale = 4;\n  CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {\n    return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);\n  }\n  uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {\n    return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);\n  }\n\n  // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W\n  // at heap_start and places the heap there.  This mode requires kSpaceBeg ==\n  // ~(uptr)0.\n  void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {\n    uptr TotalSpaceSize = kSpaceSize + AdditionalSize();\n    PremappedHeap = heap_start != 0;\n    if (PremappedHeap) {\n      CHECK(!kUsingConstantSpaceBeg);\n      NonConstSpaceBeg = heap_start;\n      uptr RegionInfoSize = AdditionalSize();\n      RegionInfoSpace =\n          address_range.Init(RegionInfoSize, PrimaryAllocatorName);\n      CHECK_NE(RegionInfoSpace, ~(uptr)0);\n      CHECK_EQ(RegionInfoSpace,\n               address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,\n                                      \"SizeClassAllocator: region info\"));\n      MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);\n    } else {\n      if (kUsingConstantSpaceBeg) {\n        CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));\n        CHECK_EQ(kSpaceBeg,\n                 address_range.Init(TotalSpaceSize, PrimaryAllocatorName,\n                                    kSpaceBeg));\n      } else {\n        // Combined allocator expects that an 2^N allocation is always aligned\n        // to 2^N. For this to work, the start of the space needs to be aligned\n        // as high as the largest size class (which also needs to be a power of\n        // 2).\n        NonConstSpaceBeg = address_range.InitAligned(\n            TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);\n        CHECK_NE(NonConstSpaceBeg, ~(uptr)0);\n      }\n      RegionInfoSpace = SpaceEnd();\n      MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),\n                           \"SizeClassAllocator: region info\");\n    }\n    SetReleaseToOSIntervalMs(release_to_os_interval_ms);\n    // Check that the RegionInfo array is aligned on the CacheLine size.\n    DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);\n  }\n\n  s32 ReleaseToOSIntervalMs() const {\n    return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);\n  }\n\n  void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {\n    atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,\n                 memory_order_relaxed);\n  }\n\n  void ForceReleaseToOS() {\n    MemoryMapperT memory_mapper(*this);\n    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {\n      Lock l(&GetRegionInfo(class_id)->mutex);\n      MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);\n    }\n  }\n\n  static bool CanAllocate(uptr size, uptr alignment) {\n    return size <= SizeClassMap::kMaxSize &&\n      alignment <= SizeClassMap::kMaxSize;\n  }\n\n  NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,\n                                  AllocatorStats *stat, uptr class_id,\n                                  const CompactPtrT *chunks, uptr n_chunks) {\n    RegionInfo *region = GetRegionInfo(class_id);\n    uptr region_beg = GetRegionBeginBySizeClass(class_id);\n    CompactPtrT *free_array = GetFreeArray(region_beg);\n\n    Lock l(&region->mutex);\n    uptr old_num_chunks = region->num_freed_chunks;\n    uptr new_num_freed_chunks = old_num_chunks + n_chunks;\n    // Failure to allocate free array space while releasing memory is non\n    // recoverable.\n    if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,\n                                       new_num_freed_chunks))) {\n      Report(\"FATAL: Internal error: %s's allocator exhausted the free list \"\n             \"space for size class %zd (%zd bytes).\\n\", SanitizerToolName,\n             class_id, ClassIdToSize(class_id));\n      Die();\n    }\n    for (uptr i = 0; i < n_chunks; i++)\n      free_array[old_num_chunks + i] = chunks[i];\n    region->num_freed_chunks = new_num_freed_chunks;\n    region->stats.n_freed += n_chunks;\n\n    MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);\n  }\n\n  NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,\n                                 CompactPtrT *chunks, uptr n_chunks) {\n    RegionInfo *region = GetRegionInfo(class_id);\n    uptr region_beg = GetRegionBeginBySizeClass(class_id);\n    CompactPtrT *free_array = GetFreeArray(region_beg);\n\n    Lock l(&region->mutex);\n#if SANITIZER_WINDOWS\n    /* On Windows unmapping of memory during __sanitizer_purge_allocator is\n    explicit and immediate, so unmapped regions must be explicitly mapped back\n    in when they are accessed again. */\n    if (region->rtoi.last_released_bytes > 0) {\n      MmapFixedOrDie(region_beg, region->mapped_user,\n                                      \"SizeClassAllocator: region data\");\n      region->rtoi.n_freed_at_last_release = 0;\n      region->rtoi.last_released_bytes = 0;\n    }\n#endif\n    if (UNLIKELY(region->num_freed_chunks < n_chunks)) {\n      if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,\n                                      n_chunks - region->num_freed_chunks)))\n        return false;\n      CHECK_GE(region->num_freed_chunks, n_chunks);\n    }\n    region->num_freed_chunks -= n_chunks;\n    uptr base_idx = region->num_freed_chunks;\n    for (uptr i = 0; i < n_chunks; i++)\n      chunks[i] = free_array[base_idx + i];\n    region->stats.n_allocated += n_chunks;\n    return true;\n  }\n\n  bool PointerIsMine(const void *p) const {\n    uptr P = reinterpret_cast<uptr>(p);\n    if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)\n      return P / kSpaceSize == kSpaceBeg / kSpaceSize;\n    return P >= SpaceBeg() && P < SpaceEnd();\n  }\n\n  uptr GetRegionBegin(const void *p) {\n    if (kUsingConstantSpaceBeg)\n      return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);\n    uptr space_beg = SpaceBeg();\n    return ((reinterpret_cast<uptr>(p)  - space_beg) & ~(kRegionSize - 1)) +\n        space_beg;\n  }\n\n  uptr GetRegionBeginBySizeClass(uptr class_id) const {\n    return SpaceBeg() + kRegionSize * class_id;\n  }\n\n  uptr GetSizeClass(const void *p) {\n    if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)\n      return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;\n    return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %\n           kNumClassesRounded;\n  }\n\n  void *GetBlockBegin(const void *p) {\n    uptr class_id = GetSizeClass(p);\n    if (class_id >= kNumClasses) return nullptr;\n    uptr size = ClassIdToSize(class_id);\n    if (!size) return nullptr;\n    uptr chunk_idx = GetChunkIdx((uptr)p, size);\n    uptr reg_beg = GetRegionBegin(p);\n    uptr beg = chunk_idx * size;\n    uptr next_beg = beg + size;\n    const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));\n    if (region->mapped_user >= next_beg)\n      return reinterpret_cast<void*>(reg_beg + beg);\n    return nullptr;\n  }\n\n  uptr GetActuallyAllocatedSize(void *p) {\n    CHECK(PointerIsMine(p));\n    return ClassIdToSize(GetSizeClass(p));\n  }\n\n  static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }\n\n  void *GetMetaData(const void *p) {\n    CHECK(kMetadataSize);\n    uptr class_id = GetSizeClass(p);\n    uptr size = ClassIdToSize(class_id);\n    if (!size)\n      return nullptr;\n    uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);\n    uptr region_beg = GetRegionBeginBySizeClass(class_id);\n    return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -\n                                    (1 + chunk_idx) * kMetadataSize);\n  }\n\n  uptr TotalMemoryUsed() {\n    uptr res = 0;\n    for (uptr i = 0; i < kNumClasses; i++)\n      res += GetRegionInfo(i)->allocated_user;\n    return res;\n  }\n\n  // Test-only.\n  void TestOnlyUnmap() {\n    UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());\n  }\n\n  static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) {\n    for (uptr class_id = 0; class_id < kNumClasses; class_id++)\n      if (stats[class_id] == start)\n        stats[class_id] = rss;\n  }\n\n  void PrintStats(uptr class_id, uptr rss) {\n    RegionInfo *region = GetRegionInfo(class_id);\n    if (region->mapped_user == 0) return;\n    uptr in_use = region->stats.n_allocated - region->stats.n_freed;\n    uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);\n    Printf(\n        \"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd \"\n        \"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd \"\n        \"last released: %6lldK region: 0x%zx\\n\",\n        region->exhausted ? \"F\" : \" \", class_id, ClassIdToSize(class_id),\n        region->mapped_user >> 10, region->stats.n_allocated,\n        region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,\n        rss >> 10, region->rtoi.num_releases,\n        region->rtoi.last_released_bytes >> 10,\n        SpaceBeg() + kRegionSize * class_id);\n  }\n\n  void PrintStats() {\n    uptr rss_stats[kNumClasses];\n    for (uptr class_id = 0; class_id < kNumClasses; class_id++)\n      rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;\n    GetMemoryProfile(FillMemoryProfile, rss_stats);\n\n    uptr total_mapped = 0;\n    uptr total_rss = 0;\n    uptr n_allocated = 0;\n    uptr n_freed = 0;\n    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {\n      RegionInfo *region = GetRegionInfo(class_id);\n      if (region->mapped_user != 0) {\n        total_mapped += region->mapped_user;\n        total_rss += rss_stats[class_id];\n      }\n      n_allocated += region->stats.n_allocated;\n      n_freed += region->stats.n_freed;\n    }\n\n    Printf(\"Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in \"\n           \"%zd allocations; remains %zd\\n\", total_mapped >> 20,\n           total_rss >> 20, n_allocated, n_allocated - n_freed);\n    for (uptr class_id = 1; class_id < kNumClasses; class_id++)\n      PrintStats(class_id, rss_stats[class_id]);\n  }\n\n  // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone\n  // introspection API.\n  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n    for (uptr i = 0; i < kNumClasses; i++) {\n      GetRegionInfo(i)->mutex.Lock();\n    }\n  }\n\n  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {\n    for (int i = (int)kNumClasses - 1; i >= 0; i--) {\n      GetRegionInfo(i)->mutex.Unlock();\n    }\n  }\n\n  // Iterate over all existing chunks.\n  // The allocator must be locked when calling this function.\n  void ForEachChunk(ForEachChunkCallback callback, void *arg) {\n    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {\n      RegionInfo *region = GetRegionInfo(class_id);\n      uptr chunk_size = ClassIdToSize(class_id);\n      uptr region_beg = SpaceBeg() + class_id * kRegionSize;\n      uptr region_allocated_user_size =\n          AddressSpaceView::Load(region)->allocated_user;\n      for (uptr chunk = region_beg;\n           chunk < region_beg + region_allocated_user_size;\n           chunk += chunk_size) {\n        // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));\n        callback(chunk, arg);\n      }\n    }\n  }\n\n  static uptr ClassIdToSize(uptr class_id) {\n    return SizeClassMap::Size(class_id);\n  }\n\n  static uptr AdditionalSize() {\n    return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,\n                     GetPageSizeCached());\n  }\n\n  typedef SizeClassMap SizeClassMapT;\n  static const uptr kNumClasses = SizeClassMap::kNumClasses;\n  static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;\n\n  // A packed array of counters. Each counter occupies 2^n bits, enough to store\n  // counter's max_value. Ctor will try to allocate the required buffer via\n  // mapper->MapPackedCounterArrayBuffer and the caller is expected to check\n  // whether the initialization was successful by checking IsAllocated() result.\n  // For the performance sake, none of the accessors check the validity of the\n  // arguments, it is assumed that index is always in [0, n) range and the value\n  // is not incremented past max_value.\n  class PackedCounterArray {\n   public:\n    template <typename MemoryMapper>\n    PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)\n        : n(num_counters) {\n      CHECK_GT(num_counters, 0);\n      CHECK_GT(max_value, 0);\n      constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;\n      // Rounding counter storage size up to the power of two allows for using\n      // bit shifts calculating particular counter's index and offset.\n      uptr counter_size_bits =\n          RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);\n      CHECK_LE(counter_size_bits, kMaxCounterBits);\n      counter_size_bits_log = Log2(counter_size_bits);\n      counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);\n\n      uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;\n      CHECK_GT(packing_ratio, 0);\n      packing_ratio_log = Log2(packing_ratio);\n      bit_offset_mask = packing_ratio - 1;\n\n      buffer = mapper->MapPackedCounterArrayBuffer(\n          RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);\n    }\n\n    bool IsAllocated() const {\n      return !!buffer;\n    }\n\n    u64 GetCount() const {\n      return n;\n    }\n\n    uptr Get(uptr i) const {\n      DCHECK_LT(i, n);\n      uptr index = i >> packing_ratio_log;\n      uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;\n      return (buffer[index] >> bit_offset) & counter_mask;\n    }\n\n    void Inc(uptr i) const {\n      DCHECK_LT(Get(i), counter_mask);\n      uptr index = i >> packing_ratio_log;\n      uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;\n      buffer[index] += 1ULL << bit_offset;\n    }\n\n    void IncRange(uptr from, uptr to) const {\n      DCHECK_LE(from, to);\n      for (uptr i = from; i <= to; i++)\n        Inc(i);\n    }\n\n   private:\n    const u64 n;\n    u64 counter_size_bits_log;\n    u64 counter_mask;\n    u64 packing_ratio_log;\n    u64 bit_offset_mask;\n    u64* buffer;\n  };\n\n  template <class MemoryMapperT>\n  class FreePagesRangeTracker {\n   public:\n    FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)\n        : memory_mapper(mapper),\n          class_id(class_id),\n          page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}\n\n    void NextPage(bool freed) {\n      if (freed) {\n        if (!in_the_range) {\n          current_range_start_page = current_page;\n          in_the_range = true;\n        }\n      } else {\n        CloseOpenedRange();\n      }\n      current_page++;\n    }\n\n    void Done() {\n      CloseOpenedRange();\n    }\n\n   private:\n    void CloseOpenedRange() {\n      if (in_the_range) {\n        memory_mapper->ReleasePageRangeToOS(\n            class_id, current_range_start_page << page_size_scaled_log,\n            current_page << page_size_scaled_log);\n        in_the_range = false;\n      }\n    }\n\n    MemoryMapperT *const memory_mapper = nullptr;\n    const uptr class_id = 0;\n    const uptr page_size_scaled_log = 0;\n    bool in_the_range = false;\n    uptr current_page = 0;\n    uptr current_range_start_page = 0;\n  };\n\n  // Iterates over the free_array to identify memory pages containing freed\n  // chunks only and returns these pages back to OS.\n  // allocated_pages_count is the total number of pages allocated for the\n  // current bucket.\n  template <typename MemoryMapper>\n  static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,\n                                    uptr free_array_count, uptr chunk_size,\n                                    uptr allocated_pages_count,\n                                    MemoryMapper *memory_mapper,\n                                    uptr class_id) {\n    const uptr page_size = GetPageSizeCached();\n\n    // Figure out the number of chunks per page and whether we can take a fast\n    // path (the number of chunks per page is the same for all pages).\n    uptr full_pages_chunk_count_max;\n    bool same_chunk_count_per_page;\n    if (chunk_size <= page_size && page_size % chunk_size == 0) {\n      // Same number of chunks per page, no cross overs.\n      full_pages_chunk_count_max = page_size / chunk_size;\n      same_chunk_count_per_page = true;\n    } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&\n        chunk_size % (page_size % chunk_size) == 0) {\n      // Some chunks are crossing page boundaries, which means that the page\n      // contains one or two partial chunks, but all pages contain the same\n      // number of chunks.\n      full_pages_chunk_count_max = page_size / chunk_size + 1;\n      same_chunk_count_per_page = true;\n    } else if (chunk_size <= page_size) {\n      // Some chunks are crossing page boundaries, which means that the page\n      // contains one or two partial chunks.\n      full_pages_chunk_count_max = page_size / chunk_size + 2;\n      same_chunk_count_per_page = false;\n    } else if (chunk_size > page_size && chunk_size % page_size == 0) {\n      // One chunk covers multiple pages, no cross overs.\n      full_pages_chunk_count_max = 1;\n      same_chunk_count_per_page = true;\n    } else if (chunk_size > page_size) {\n      // One chunk covers multiple pages, Some chunks are crossing page\n      // boundaries. Some pages contain one chunk, some contain two.\n      full_pages_chunk_count_max = 2;\n      same_chunk_count_per_page = false;\n    } else {\n      UNREACHABLE(\"All chunk_size/page_size ratios must be handled.\");\n    }\n\n    PackedCounterArray counters(allocated_pages_count,\n                                full_pages_chunk_count_max, memory_mapper);\n    if (!counters.IsAllocated())\n      return;\n\n    const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;\n    const uptr page_size_scaled = page_size >> kCompactPtrScale;\n    const uptr page_size_scaled_log = Log2(page_size_scaled);\n\n    // Iterate over free chunks and count how many free chunks affect each\n    // allocated page.\n    if (chunk_size <= page_size && page_size % chunk_size == 0) {\n      // Each chunk affects one page only.\n      for (uptr i = 0; i < free_array_count; i++)\n        counters.Inc(free_array[i] >> page_size_scaled_log);\n    } else {\n      // In all other cases chunks might affect more than one page.\n      for (uptr i = 0; i < free_array_count; i++) {\n        counters.IncRange(\n            free_array[i] >> page_size_scaled_log,\n            (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);\n      }\n    }\n\n    // Iterate over pages detecting ranges of pages with chunk counters equal\n    // to the expected number of chunks for the particular page.\n    FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);\n    if (same_chunk_count_per_page) {\n      // Fast path, every page has the same number of chunks affecting it.\n      for (uptr i = 0; i < counters.GetCount(); i++)\n        range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);\n    } else {\n      // Show path, go through the pages keeping count how many chunks affect\n      // each page.\n      const uptr pn =\n          chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;\n      const uptr pnc = pn * chunk_size_scaled;\n      // The idea is to increment the current page pointer by the first chunk\n      // size, middle portion size (the portion of the page covered by chunks\n      // except the first and the last one) and then the last chunk size, adding\n      // up the number of chunks on the current page and checking on every step\n      // whether the page boundary was crossed.\n      uptr prev_page_boundary = 0;\n      uptr current_boundary = 0;\n      for (uptr i = 0; i < counters.GetCount(); i++) {\n        uptr page_boundary = prev_page_boundary + page_size_scaled;\n        uptr chunks_per_page = pn;\n        if (current_boundary < page_boundary) {\n          if (current_boundary > prev_page_boundary)\n            chunks_per_page++;\n          current_boundary += pnc;\n          if (current_boundary < page_boundary) {\n            chunks_per_page++;\n            current_boundary += chunk_size_scaled;\n          }\n        }\n        prev_page_boundary = page_boundary;\n\n        range_tracker.NextPage(counters.Get(i) == chunks_per_page);\n      }\n    }\n    range_tracker.Done();\n  }\n\n private:\n  friend class MemoryMapper<ThisT>;\n\n  ReservedAddressRange address_range;\n\n  static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;\n  // FreeArray is the array of free-d chunks (stored as 4-byte offsets).\n  // In the worst case it may require kRegionSize/SizeClassMap::kMinSize\n  // elements, but in reality this will not happen. For simplicity we\n  // dedicate 1/8 of the region's virtual space to FreeArray.\n  static const uptr kFreeArraySize = kRegionSize / 8;\n\n  static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;\n  uptr NonConstSpaceBeg;\n  uptr SpaceBeg() const {\n    return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;\n  }\n  uptr SpaceEnd() const { return  SpaceBeg() + kSpaceSize; }\n  // kRegionSize must be >= 2^32.\n  COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));\n  // kRegionSize must be <= 2^36, see CompactPtrT.\n  COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));\n  // Call mmap for user memory with at least this size.\n  static const uptr kUserMapSize = 1 << 16;\n  // Call mmap for metadata memory with at least this size.\n  static const uptr kMetaMapSize = 1 << 16;\n  // Call mmap for free array memory with at least this size.\n  static const uptr kFreeArrayMapSize = 1 << 16;\n\n  atomic_sint32_t release_to_os_interval_ms_;\n\n  uptr RegionInfoSpace;\n\n  // True if the user has already mapped the entire heap R/W.\n  bool PremappedHeap;\n\n  struct Stats {\n    uptr n_allocated;\n    uptr n_freed;\n  };\n\n  struct ReleaseToOsInfo {\n    uptr n_freed_at_last_release;\n    uptr num_releases;\n    u64 last_release_at_ns;\n    u64 last_released_bytes;\n  };\n\n  struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {\n    Mutex mutex;\n    uptr num_freed_chunks;  // Number of elements in the freearray.\n    uptr mapped_free_array;  // Bytes mapped for freearray.\n    uptr allocated_user;  // Bytes allocated for user memory.\n    uptr allocated_meta;  // Bytes allocated for metadata.\n    uptr mapped_user;  // Bytes mapped for user memory.\n    uptr mapped_meta;  // Bytes mapped for metadata.\n    u32 rand_state;  // Seed for random shuffle, used if kRandomShuffleChunks.\n    bool exhausted;  // Whether region is out of space for new chunks.\n    Stats stats;\n    ReleaseToOsInfo rtoi;\n  };\n  COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);\n\n  RegionInfo *GetRegionInfo(uptr class_id) const {\n    DCHECK_LT(class_id, kNumClasses);\n    RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);\n    return &regions[class_id];\n  }\n\n  uptr GetMetadataEnd(uptr region_beg) const {\n    return region_beg + kRegionSize - kFreeArraySize;\n  }\n\n  uptr GetChunkIdx(uptr chunk, uptr size) const {\n    if (!kUsingConstantSpaceBeg)\n      chunk -= SpaceBeg();\n\n    uptr offset = chunk % kRegionSize;\n    // Here we divide by a non-constant. This is costly.\n    // size always fits into 32-bits. If the offset fits too, use 32-bit div.\n    if (offset >> (SANITIZER_WORDSIZE / 2))\n      return offset / size;\n    return (u32)offset / (u32)size;\n  }\n\n  CompactPtrT *GetFreeArray(uptr region_beg) const {\n    return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));\n  }\n\n  bool MapWithCallback(uptr beg, uptr size, const char *name) {\n    if (PremappedHeap)\n      return beg >= NonConstSpaceBeg &&\n             beg + size <= NonConstSpaceBeg + kSpaceSize;\n    uptr mapped = address_range.Map(beg, size, name);\n    if (UNLIKELY(!mapped))\n      return false;\n    CHECK_EQ(beg, mapped);\n    MapUnmapCallback().OnMap(beg, size);\n    return true;\n  }\n\n  void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {\n    if (PremappedHeap) {\n      CHECK_GE(beg, NonConstSpaceBeg);\n      CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);\n      return;\n    }\n    CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));\n    MapUnmapCallback().OnMap(beg, size);\n  }\n\n  void UnmapWithCallbackOrDie(uptr beg, uptr size) {\n    if (PremappedHeap)\n      return;\n    MapUnmapCallback().OnUnmap(beg, size);\n    address_range.Unmap(beg, size);\n  }\n\n  bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,\n                            uptr num_freed_chunks) {\n    uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);\n    if (region->mapped_free_array < needed_space) {\n      uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);\n      CHECK_LE(new_mapped_free_array, kFreeArraySize);\n      uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +\n                             region->mapped_free_array;\n      uptr new_map_size = new_mapped_free_array - region->mapped_free_array;\n      if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,\n                                    \"SizeClassAllocator: freearray\")))\n        return false;\n      region->mapped_free_array = new_mapped_free_array;\n    }\n    return true;\n  }\n\n  // Check whether this size class is exhausted.\n  bool IsRegionExhausted(RegionInfo *region, uptr class_id,\n                         uptr additional_map_size) {\n    if (LIKELY(region->mapped_user + region->mapped_meta +\n               additional_map_size <= kRegionSize - kFreeArraySize))\n      return false;\n    if (!region->exhausted) {\n      region->exhausted = true;\n      Printf(\"%s: Out of memory. \", SanitizerToolName);\n      Printf(\"The process has exhausted %zuMB for size class %zu.\\n\",\n             kRegionSize >> 20, ClassIdToSize(class_id));\n    }\n    return true;\n  }\n\n  NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,\n                                  RegionInfo *region, uptr requested_count) {\n    // region->mutex is held.\n    const uptr region_beg = GetRegionBeginBySizeClass(class_id);\n    const uptr size = ClassIdToSize(class_id);\n\n    const uptr total_user_bytes =\n        region->allocated_user + requested_count * size;\n    // Map more space for chunks, if necessary.\n    if (LIKELY(total_user_bytes > region->mapped_user)) {\n      if (UNLIKELY(region->mapped_user == 0)) {\n        if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)\n          // The random state is initialized from ASLR.\n          region->rand_state = static_cast<u32>(region_beg >> 12);\n        // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,\n        // preventing just allocated memory from being released sooner than\n        // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls\n        // for short lived processes.\n        // Do it only when the feature is turned on, to avoid a potentially\n        // extraneous syscall.\n        if (ReleaseToOSIntervalMs() >= 0)\n          region->rtoi.last_release_at_ns = MonotonicNanoTime();\n      }\n      // Do the mmap for the user memory.\n      const uptr user_map_size =\n          RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);\n      if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))\n        return false;\n      if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,\n                                    user_map_size,\n                                    \"SizeClassAllocator: region data\")))\n        return false;\n      stat->Add(AllocatorStatMapped, user_map_size);\n      region->mapped_user += user_map_size;\n    }\n    const uptr new_chunks_count =\n        (region->mapped_user - region->allocated_user) / size;\n\n    if (kMetadataSize) {\n      // Calculate the required space for metadata.\n      const uptr total_meta_bytes =\n          region->allocated_meta + new_chunks_count * kMetadataSize;\n      const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?\n          RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;\n      // Map more space for metadata, if necessary.\n      if (meta_map_size) {\n        if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))\n          return false;\n        if (UNLIKELY(!MapWithCallback(\n            GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,\n            meta_map_size, \"SizeClassAllocator: region metadata\")))\n          return false;\n        region->mapped_meta += meta_map_size;\n      }\n    }\n\n    // If necessary, allocate more space for the free array and populate it with\n    // newly allocated chunks.\n    const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;\n    if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))\n      return false;\n    CompactPtrT *free_array = GetFreeArray(region_beg);\n    for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;\n         i++, chunk += size)\n      free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);\n    if (kRandomShuffleChunks)\n      RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,\n                    &region->rand_state);\n\n    // All necessary memory is mapped and now it is safe to advance all\n    // 'allocated_*' counters.\n    region->num_freed_chunks += new_chunks_count;\n    region->allocated_user += new_chunks_count * size;\n    CHECK_LE(region->allocated_user, region->mapped_user);\n    region->allocated_meta += new_chunks_count * kMetadataSize;\n    CHECK_LE(region->allocated_meta, region->mapped_meta);\n    region->exhausted = false;\n\n    // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent\n    // MaybeReleaseToOS from releasing just allocated pages or protect these\n    // not yet used chunks some other way.\n\n    return true;\n  }\n\n  // Attempts to release RAM occupied by freed chunks back to OS. The region is\n  // expected to be locked.\n  //\n  // TODO(morehouse): Support a callback on memory release so HWASan can release\n  // aliases as well.\n  void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,\n                        bool force) {\n    RegionInfo *region = GetRegionInfo(class_id);\n    const uptr chunk_size = ClassIdToSize(class_id);\n    const uptr page_size = GetPageSizeCached();\n\n    uptr n = region->num_freed_chunks;\n    if (n * chunk_size < page_size)\n      return;  // No chance to release anything.\n    if ((region->stats.n_freed -\n         region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {\n      return;  // Nothing new to release.\n    }\n\n    if (!force) {\n      s32 interval_ms = ReleaseToOSIntervalMs();\n      if (interval_ms < 0)\n        return;\n\n      if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >\n          MonotonicNanoTime()) {\n        return;  // Memory was returned recently.\n      }\n    }\n\n    ReleaseFreeMemoryToOS(\n        GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,\n        RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,\n        class_id);\n\n    uptr ranges, bytes;\n    if (memory_mapper->GetAndResetStats(ranges, bytes)) {\n      region->rtoi.n_freed_at_last_release = region->stats.n_freed;\n      region->rtoi.num_releases += ranges;\n      region->rtoi.last_released_bytes = bytes;\n    }\n    region->rtoi.last_release_at_ns = MonotonicNanoTime();\n  }\n};\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_report.cpp",
    "content": "//===-- sanitizer_allocator_report.cpp --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n///\n/// \\file\n/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.\n///\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_allocator.h\"\n#include \"sanitizer_allocator_report.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_report_decorator.h\"\n\nnamespace __sanitizer {\n\nclass ScopedAllocatorErrorReport {\n public:\n  ScopedAllocatorErrorReport(const char *error_summary_,\n                             const StackTrace *stack_)\n      : error_summary(error_summary_),\n        stack(stack_) {\n    Printf(\"%s\", d.Error());\n  }\n  ~ScopedAllocatorErrorReport() {\n    Printf(\"%s\", d.Default());\n    stack->Print();\n    PrintHintAllocatorCannotReturnNull();\n    ReportErrorSummary(error_summary, stack);\n  }\n\n private:\n  ScopedErrorReportLock lock;\n  const char *error_summary;\n  const StackTrace* const stack;\n  const SanitizerCommonDecorator d;\n};\n\nvoid NORETURN ReportCallocOverflow(uptr count, uptr size,\n                                   const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"calloc-overflow\", stack);\n    Report(\"ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) \"\n           \"cannot be represented in type size_t\\n\", SanitizerToolName, count,\n           size);\n  }\n  Die();\n}\n\nvoid NORETURN ReportReallocArrayOverflow(uptr count, uptr size,\n                                         const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"reallocarray-overflow\", stack);\n    Report(\n        \"ERROR: %s: reallocarray parameters overflow: count * size (%zd * %zd) \"\n        \"cannot be represented in type size_t\\n\",\n        SanitizerToolName, count, size);\n  }\n  Die();\n}\n\nvoid NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"pvalloc-overflow\", stack);\n    Report(\"ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to \"\n           \"system page size 0x%zx cannot be represented in type size_t\\n\",\n           SanitizerToolName, size, GetPageSizeCached());\n  }\n  Die();\n}\n\nvoid NORETURN ReportInvalidAllocationAlignment(uptr alignment,\n                                               const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"invalid-allocation-alignment\", stack);\n    Report(\"ERROR: %s: invalid allocation alignment: %zd, alignment must be a \"\n           \"power of two\\n\", SanitizerToolName, alignment);\n  }\n  Die();\n}\n\nvoid NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,\n                                                 const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"invalid-aligned-alloc-alignment\", stack);\n#if SANITIZER_POSIX\n    Report(\"ERROR: %s: invalid alignment requested in \"\n           \"aligned_alloc: %zd, alignment must be a power of two and the \"\n           \"requested size 0x%zx must be a multiple of alignment\\n\",\n           SanitizerToolName, alignment, size);\n#else\n    Report(\"ERROR: %s: invalid alignment requested in aligned_alloc: %zd, \"\n           \"the requested size 0x%zx must be a multiple of alignment\\n\",\n           SanitizerToolName, alignment, size);\n#endif\n  }\n  Die();\n}\n\nvoid NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,\n                                                  const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"invalid-posix-memalign-alignment\",\n                                      stack);\n    Report(\n        \"ERROR: %s: invalid alignment requested in \"\n        \"posix_memalign: %zd, alignment must be a power of two and a \"\n        \"multiple of sizeof(void*) == %zd\\n\",\n        SanitizerToolName, alignment, sizeof(void *));\n  }\n  Die();\n}\n\nvoid NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,\n                                         const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"allocation-size-too-big\", stack);\n    Report(\"ERROR: %s: requested allocation size 0x%zx exceeds maximum \"\n           \"supported size of 0x%zx\\n\", SanitizerToolName, user_size, max_size);\n  }\n  Die();\n}\n\nvoid NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"out-of-memory\", stack);\n    Report(\"ERROR: %s: allocator is out of memory trying to allocate 0x%zx \"\n           \"bytes\\n\", SanitizerToolName, requested_size);\n  }\n  Die();\n}\n\nvoid NORETURN ReportRssLimitExceeded(const StackTrace *stack) {\n  {\n    ScopedAllocatorErrorReport report(\"rss-limit-exceeded\", stack);\n    Report(\"ERROR: %s: allocator exceeded the RSS limit\\n\", SanitizerToolName);\n  }\n  Die();\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_report.h",
    "content": "//===-- sanitizer_allocator_report.h ----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n///\n/// \\file\n/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.\n///\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ALLOCATOR_REPORT_H\n#define SANITIZER_ALLOCATOR_REPORT_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_stacktrace.h\"\n\nnamespace __sanitizer {\n\nvoid NORETURN ReportCallocOverflow(uptr count, uptr size,\n                                   const StackTrace *stack);\nvoid NORETURN ReportReallocArrayOverflow(uptr count, uptr size,\n                                         const StackTrace *stack);\nvoid NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack);\nvoid NORETURN ReportInvalidAllocationAlignment(uptr alignment,\n                                               const StackTrace *stack);\nvoid NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,\n                                                 const StackTrace *stack);\nvoid NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,\n                                                  const StackTrace *stack);\nvoid NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,\n                                         const StackTrace *stack);\nvoid NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack);\nvoid NORETURN ReportRssLimitExceeded(const StackTrace *stack);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_ALLOCATOR_REPORT_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_secondary.h",
    "content": "//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\n// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total\n// allocated chunks. To be used in memory constrained or not memory hungry cases\n// (currently, 32 bits and internal allocator).\nclass LargeMmapAllocatorPtrArrayStatic {\n public:\n  inline void *Init() { return &p_[0]; }\n  inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }\n private:\n  static const int kMaxNumChunks = 1 << 15;\n  uptr p_[kMaxNumChunks];\n};\n\n// Much less restricted LargeMmapAllocator chunks list (comparing to\n// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.\n// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the\n// same functionality in Fuchsia case, which does not support MAP_NORESERVE.\nclass LargeMmapAllocatorPtrArrayDynamic {\n public:\n  inline void *Init() {\n    uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),\n                                 SecondaryAllocatorName);\n    CHECK(p);\n    return reinterpret_cast<void*>(p);\n  }\n\n  inline void EnsureSpace(uptr n) {\n    CHECK_LT(n, kMaxNumChunks);\n    DCHECK(n <= n_reserved_);\n    if (UNLIKELY(n == n_reserved_)) {\n      address_range_.MapOrDie(\n          reinterpret_cast<uptr>(address_range_.base()) +\n              n_reserved_ * sizeof(uptr),\n          kChunksBlockCount * sizeof(uptr));\n      n_reserved_ += kChunksBlockCount;\n    }\n  }\n\n private:\n  static const int kMaxNumChunks = 1 << 20;\n  static const int kChunksBlockCount = 1 << 14;\n  ReservedAddressRange address_range_;\n  uptr n_reserved_;\n};\n\n#if SANITIZER_WORDSIZE == 32\ntypedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;\n#else\ntypedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;\n#endif\n\n// This class can (de)allocate only large chunks of memory using mmap/unmap.\n// The main purpose of this allocator is to cover large and rare allocation\n// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).\ntemplate <class MapUnmapCallback = NoOpMapUnmapCallback,\n          class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,\n          class AddressSpaceViewTy = LocalAddressSpaceView>\nclass LargeMmapAllocator {\n public:\n  using AddressSpaceView = AddressSpaceViewTy;\n  void InitLinkerInitialized() {\n    page_size_ = GetPageSizeCached();\n    chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());\n  }\n\n  void Init() {\n    internal_memset(this, 0, sizeof(*this));\n    InitLinkerInitialized();\n  }\n\n  void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {\n    CHECK(IsPowerOfTwo(alignment));\n    uptr map_size = RoundUpMapSize(size);\n    if (alignment > page_size_)\n      map_size += alignment;\n    // Overflow.\n    if (map_size < size) {\n      Report(\"WARNING: %s: LargeMmapAllocator allocation overflow: \"\n             \"0x%zx bytes with 0x%zx alignment requested\\n\",\n             SanitizerToolName, map_size, alignment);\n      return nullptr;\n    }\n    uptr map_beg = reinterpret_cast<uptr>(\n        MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));\n    if (!map_beg)\n      return nullptr;\n    CHECK(IsAligned(map_beg, page_size_));\n    MapUnmapCallback().OnMap(map_beg, map_size);\n    uptr map_end = map_beg + map_size;\n    uptr res = map_beg + page_size_;\n    if (res & (alignment - 1))  // Align.\n      res += alignment - (res & (alignment - 1));\n    CHECK(IsAligned(res, alignment));\n    CHECK(IsAligned(res, page_size_));\n    CHECK_GE(res + size, map_beg);\n    CHECK_LE(res + size, map_end);\n    Header *h = GetHeader(res);\n    h->size = size;\n    h->map_beg = map_beg;\n    h->map_size = map_size;\n    uptr size_log = MostSignificantSetBitIndex(map_size);\n    CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));\n    {\n      SpinMutexLock l(&mutex_);\n      ptr_array_.EnsureSpace(n_chunks_);\n      uptr idx = n_chunks_++;\n      h->chunk_idx = idx;\n      chunks_[idx] = h;\n      chunks_sorted_ = false;\n      stats.n_allocs++;\n      stats.currently_allocated += map_size;\n      stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);\n      stats.by_size_log[size_log]++;\n      stat->Add(AllocatorStatAllocated, map_size);\n      stat->Add(AllocatorStatMapped, map_size);\n    }\n    return reinterpret_cast<void*>(res);\n  }\n\n  void Deallocate(AllocatorStats *stat, void *p) {\n    Header *h = GetHeader(p);\n    {\n      SpinMutexLock l(&mutex_);\n      uptr idx = h->chunk_idx;\n      CHECK_EQ(chunks_[idx], h);\n      CHECK_LT(idx, n_chunks_);\n      chunks_[idx] = chunks_[--n_chunks_];\n      chunks_[idx]->chunk_idx = idx;\n      chunks_sorted_ = false;\n      stats.n_frees++;\n      stats.currently_allocated -= h->map_size;\n      stat->Sub(AllocatorStatAllocated, h->map_size);\n      stat->Sub(AllocatorStatMapped, h->map_size);\n    }\n    MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);\n    UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);\n  }\n\n  uptr TotalMemoryUsed() {\n    SpinMutexLock l(&mutex_);\n    uptr res = 0;\n    for (uptr i = 0; i < n_chunks_; i++) {\n      Header *h = chunks_[i];\n      CHECK_EQ(h->chunk_idx, i);\n      res += RoundUpMapSize(h->size);\n    }\n    return res;\n  }\n\n  bool PointerIsMine(const void *p) const {\n    return GetBlockBegin(p) != nullptr;\n  }\n\n  uptr GetActuallyAllocatedSize(void *p) {\n    return RoundUpTo(GetHeader(p)->size, page_size_);\n  }\n\n  // At least page_size_/2 metadata bytes is available.\n  void *GetMetaData(const void *p) {\n    // Too slow: CHECK_EQ(p, GetBlockBegin(p));\n    if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {\n      Printf(\"%s: bad pointer %p\\n\", SanitizerToolName, p);\n      CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));\n    }\n    return GetHeader(p) + 1;\n  }\n\n  void *GetBlockBegin(const void *ptr) const {\n    uptr p = reinterpret_cast<uptr>(ptr);\n    SpinMutexLock l(&mutex_);\n    uptr nearest_chunk = 0;\n    Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);\n    // Cache-friendly linear search.\n    for (uptr i = 0; i < n_chunks_; i++) {\n      uptr ch = reinterpret_cast<uptr>(chunks[i]);\n      if (p < ch) continue;  // p is at left to this chunk, skip it.\n      if (p - ch < p - nearest_chunk)\n        nearest_chunk = ch;\n    }\n    if (!nearest_chunk)\n      return nullptr;\n    const Header *h =\n        AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));\n    Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);\n    CHECK_GE(nearest_chunk, h->map_beg);\n    CHECK_LT(nearest_chunk, h->map_beg + h->map_size);\n    CHECK_LE(nearest_chunk, p);\n    if (h->map_beg + h->map_size <= p)\n      return nullptr;\n    return GetUser(h_ptr);\n  }\n\n  void EnsureSortedChunks() {\n    if (chunks_sorted_) return;\n    Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);\n    Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);\n    for (uptr i = 0; i < n_chunks_; i++)\n      AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;\n    chunks_sorted_ = true;\n  }\n\n  // This function does the same as GetBlockBegin, but is much faster.\n  // Must be called with the allocator locked.\n  void *GetBlockBeginFastLocked(void *ptr) {\n    mutex_.CheckLocked();\n    uptr p = reinterpret_cast<uptr>(ptr);\n    uptr n = n_chunks_;\n    if (!n) return nullptr;\n    EnsureSortedChunks();\n    Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);\n    auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);\n    auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +\n                     AddressSpaceView::Load(chunks[n - 1])->map_size;\n    if (p < min_mmap_ || p >= max_mmap_)\n      return nullptr;\n    uptr beg = 0, end = n - 1;\n    // This loop is a log(n) lower_bound. It does not check for the exact match\n    // to avoid expensive cache-thrashing loads.\n    while (end - beg >= 2) {\n      uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1\n      if (p < reinterpret_cast<uptr>(chunks[mid]))\n        end = mid - 1;  // We are not interested in chunks[mid].\n      else\n        beg = mid;  // chunks[mid] may still be what we want.\n    }\n\n    if (beg < end) {\n      CHECK_EQ(beg + 1, end);\n      // There are 2 chunks left, choose one.\n      if (p >= reinterpret_cast<uptr>(chunks[end]))\n        beg = end;\n    }\n\n    const Header *h = AddressSpaceView::Load(chunks[beg]);\n    Header *h_ptr = chunks[beg];\n    if (h->map_beg + h->map_size <= p || p < h->map_beg)\n      return nullptr;\n    return GetUser(h_ptr);\n  }\n\n  void PrintStats() {\n    Printf(\"Stats: LargeMmapAllocator: allocated %zd times, \"\n           \"remains %zd (%zd K) max %zd M; by size logs: \",\n           stats.n_allocs, stats.n_allocs - stats.n_frees,\n           stats.currently_allocated >> 10, stats.max_allocated >> 20);\n    for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {\n      uptr c = stats.by_size_log[i];\n      if (!c) continue;\n      Printf(\"%zd:%zd; \", i, c);\n    }\n    Printf(\"\\n\");\n  }\n\n  // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone\n  // introspection API.\n  void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }\n\n  void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }\n\n  // Iterate over all existing chunks.\n  // The allocator must be locked when calling this function.\n  void ForEachChunk(ForEachChunkCallback callback, void *arg) {\n    EnsureSortedChunks();  // Avoid doing the sort while iterating.\n    const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);\n    for (uptr i = 0; i < n_chunks_; i++) {\n      const Header *t = chunks[i];\n      callback(reinterpret_cast<uptr>(GetUser(t)), arg);\n      // Consistency check: verify that the array did not change.\n      CHECK_EQ(chunks[i], t);\n      CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);\n    }\n  }\n\n private:\n  struct Header {\n    uptr map_beg;\n    uptr map_size;\n    uptr size;\n    uptr chunk_idx;\n  };\n\n  Header *GetHeader(uptr p) {\n    CHECK(IsAligned(p, page_size_));\n    return reinterpret_cast<Header*>(p - page_size_);\n  }\n  Header *GetHeader(const void *p) {\n    return GetHeader(reinterpret_cast<uptr>(p));\n  }\n\n  void *GetUser(const Header *h) const {\n    CHECK(IsAligned((uptr)h, page_size_));\n    return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);\n  }\n\n  uptr RoundUpMapSize(uptr size) {\n    return RoundUpTo(size, page_size_) + page_size_;\n  }\n\n  uptr page_size_;\n  Header **chunks_;\n  PtrArrayT ptr_array_;\n  uptr n_chunks_;\n  bool chunks_sorted_;\n  struct Stats {\n    uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];\n  } stats;\n  mutable StaticSpinMutex mutex_;\n};\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_size_class_map.h",
    "content": "//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\n// SizeClassMap maps allocation sizes into size classes and back.\n// Class 0 always corresponds to size 0.\n// The other sizes are controlled by the template parameters:\n//   kMinSizeLog: defines the class 1    as 2^kMinSizeLog.\n//   kMaxSizeLog: defines the last class as 2^kMaxSizeLog.\n//   kMidSizeLog: the classes starting from 1 increase with step\n//                2^kMinSizeLog until 2^kMidSizeLog.\n//   kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.\n//             E.g. with kNumBits==3 all size classes after 2^kMidSizeLog\n//             look like 0b1xx0..0, where x is either 0 or 1.\n//\n// Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:\n//\n// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).\n// Next 4 classes: 256 + i * 64  (i = 1 to 4).\n// Next 4 classes: 512 + i * 128 (i = 1 to 4).\n// ...\n// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).\n// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.\n//\n// This structure of the size class map gives us:\n//   - Efficient table-free class-to-size and size-to-class functions.\n//   - Difference between two consequent size classes is between 14% and 25%\n//\n// This class also gives a hint to a thread-caching allocator about the amount\n// of chunks that need to be cached per-thread:\n//  - kMaxNumCachedHint is a hint for maximal number of chunks per size class.\n//    The actual number is computed in TransferBatch.\n//  - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.\n//\n// Part of output of SizeClassMap::Print():\n// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0\n// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1\n// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2\n// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3\n// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4\n// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5\n// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6\n// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7\n//\n// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8\n// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9\n// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10\n// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11\n// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12\n// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13\n// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14\n// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15\n//\n// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16\n// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17\n// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18\n// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19\n//\n// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20\n// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21\n// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22\n// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23\n//\n// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24\n// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25\n// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26\n// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27\n//\n// ...\n//\n// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48\n// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49\n// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50\n// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51\n//\n// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52\n//\n//\n// Another example (kNumBits=2):\n// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0\n// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1\n// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2\n// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3\n// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4\n// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5\n// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6\n// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7\n// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8\n// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9\n// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10\n// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11\n// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12\n// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13\n// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14\n// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15\n// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16\n// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17\n// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18\n// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19\n// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20\n// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21\n// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22\n// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23\n// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24\n// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25\n// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26\n\ntemplate <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,\n          uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>\nclass SizeClassMap {\n  static const uptr kMinSize = 1 << kMinSizeLog;\n  static const uptr kMidSize = 1 << kMidSizeLog;\n  static const uptr kMidClass = kMidSize / kMinSize;\n  static const uptr S = kNumBits - 1;\n  static const uptr M = (1 << S) - 1;\n\n public:\n  // kMaxNumCachedHintT is a power of two. It serves as a hint\n  // for the size of TransferBatch, the actual size could be a bit smaller.\n  static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;\n  COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);\n\n  static const uptr kMaxSize = 1UL << kMaxSizeLog;\n  static const uptr kNumClasses =\n      kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;\n  static const uptr kLargestClassID = kNumClasses - 2;\n  static const uptr kBatchClassID = kNumClasses - 1;\n  COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);\n  static const uptr kNumClassesRounded =\n      kNumClasses <= 32  ? 32 :\n      kNumClasses <= 64  ? 64 :\n      kNumClasses <= 128 ? 128 : 256;\n\n  static uptr Size(uptr class_id) {\n    // Estimate the result for kBatchClassID because this class does not know\n    // the exact size of TransferBatch. It's OK since we are using the actual\n    // sizeof(TransferBatch) where it matters.\n    if (UNLIKELY(class_id == kBatchClassID))\n      return kMaxNumCachedHint * sizeof(uptr);\n    if (class_id <= kMidClass)\n      return kMinSize * class_id;\n    class_id -= kMidClass;\n    uptr t = kMidSize << (class_id >> S);\n    return t + (t >> S) * (class_id & M);\n  }\n\n  static uptr ClassID(uptr size) {\n    if (UNLIKELY(size > kMaxSize))\n      return 0;\n    if (size <= kMidSize)\n      return (size + kMinSize - 1) >> kMinSizeLog;\n    const uptr l = MostSignificantSetBitIndex(size);\n    const uptr hbits = (size >> (l - S)) & M;\n    const uptr lbits = size & ((1U << (l - S)) - 1);\n    const uptr l1 = l - kMidSizeLog;\n    return kMidClass + (l1 << S) + hbits + (lbits > 0);\n  }\n\n  static uptr MaxCachedHint(uptr size) {\n    DCHECK_LE(size, kMaxSize);\n    if (UNLIKELY(size == 0))\n      return 0;\n    uptr n;\n    // Force a 32-bit division if the template parameters allow for it.\n    if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)\n      n = (1UL << kMaxBytesCachedLog) / size;\n    else\n      n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);\n    return Max<uptr>(1U, Min(kMaxNumCachedHint, n));\n  }\n\n  static void Print() {\n    uptr prev_s = 0;\n    uptr total_cached = 0;\n    for (uptr i = 0; i < kNumClasses; i++) {\n      uptr s = Size(i);\n      if (s >= kMidSize / 2 && (s & (s - 1)) == 0)\n        Printf(\"\\n\");\n      uptr d = s - prev_s;\n      uptr p = prev_s ? (d * 100 / prev_s) : 0;\n      uptr l = s ? MostSignificantSetBitIndex(s) : 0;\n      uptr cached = MaxCachedHint(s) * s;\n      if (i == kBatchClassID)\n        d = p = l = 0;\n      Printf(\n          \"c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\\n\",\n          i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));\n      total_cached += cached;\n      prev_s = s;\n    }\n    Printf(\"Total cached: %zu\\n\", total_cached);\n  }\n\n  static void Validate() {\n    for (uptr c = 1; c < kNumClasses; c++) {\n      // Printf(\"Validate: c%zd\\n\", c);\n      uptr s = Size(c);\n      CHECK_NE(s, 0U);\n      if (c == kBatchClassID)\n        continue;\n      CHECK_EQ(ClassID(s), c);\n      if (c < kLargestClassID)\n        CHECK_EQ(ClassID(s + 1), c + 1);\n      CHECK_EQ(ClassID(s - 1), c);\n      CHECK_GT(Size(c), Size(c - 1));\n    }\n    CHECK_EQ(ClassID(kMaxSize + 1), 0);\n\n    for (uptr s = 1; s <= kMaxSize; s++) {\n      uptr c = ClassID(s);\n      // Printf(\"s%zd => c%zd\\n\", s, c);\n      CHECK_LT(c, kNumClasses);\n      CHECK_GE(Size(c), s);\n      if (c > 0)\n        CHECK_LT(Size(c - 1), s);\n    }\n  }\n};\n\ntypedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;\ntypedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;\ntypedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;\n\n// The following SizeClassMap only holds a way small number of cached entries,\n// allowing for denser per-class arrays, smaller memory footprint and usually\n// better performances in threaded environments.\ntypedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;\n// Similar to VeryCompact map above, this one has a small number of different\n// size classes, and also reduced thread-local caches.\ntypedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap;\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_allocator_stats.h",
    "content": "//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_ALLOCATOR_H\n#error This file must be included inside sanitizer_allocator.h\n#endif\n\n// Memory allocator statistics\nenum AllocatorStat {\n  AllocatorStatAllocated,\n  AllocatorStatMapped,\n  AllocatorStatCount\n};\n\ntypedef uptr AllocatorStatCounters[AllocatorStatCount];\n\n// Per-thread stats, live in per-thread cache.\nclass AllocatorStats {\n public:\n  void Init() {\n    internal_memset(this, 0, sizeof(*this));\n  }\n  void InitLinkerInitialized() {}\n\n  void Add(AllocatorStat i, uptr v) {\n    v += atomic_load(&stats_[i], memory_order_relaxed);\n    atomic_store(&stats_[i], v, memory_order_relaxed);\n  }\n\n  void Sub(AllocatorStat i, uptr v) {\n    v = atomic_load(&stats_[i], memory_order_relaxed) - v;\n    atomic_store(&stats_[i], v, memory_order_relaxed);\n  }\n\n  void Set(AllocatorStat i, uptr v) {\n    atomic_store(&stats_[i], v, memory_order_relaxed);\n  }\n\n  uptr Get(AllocatorStat i) const {\n    return atomic_load(&stats_[i], memory_order_relaxed);\n  }\n\n private:\n  friend class AllocatorGlobalStats;\n  AllocatorStats *next_;\n  AllocatorStats *prev_;\n  atomic_uintptr_t stats_[AllocatorStatCount];\n};\n\n// Global stats, used for aggregation and querying.\nclass AllocatorGlobalStats : public AllocatorStats {\n public:\n  void InitLinkerInitialized() {\n    next_ = this;\n    prev_ = this;\n  }\n  void Init() {\n    internal_memset(this, 0, sizeof(*this));\n    InitLinkerInitialized();\n  }\n\n  void Register(AllocatorStats *s) {\n    SpinMutexLock l(&mu_);\n    s->next_ = next_;\n    s->prev_ = this;\n    next_->prev_ = s;\n    next_ = s;\n  }\n\n  void Unregister(AllocatorStats *s) {\n    SpinMutexLock l(&mu_);\n    s->prev_->next_ = s->next_;\n    s->next_->prev_ = s->prev_;\n    for (int i = 0; i < AllocatorStatCount; i++)\n      Add(AllocatorStat(i), s->Get(AllocatorStat(i)));\n  }\n\n  void Get(AllocatorStatCounters s) const {\n    internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));\n    SpinMutexLock l(&mu_);\n    const AllocatorStats *stats = this;\n    for (;;) {\n      for (int i = 0; i < AllocatorStatCount; i++)\n        s[i] += stats->Get(AllocatorStat(i));\n      stats = stats->next_;\n      if (stats == this)\n        break;\n    }\n    // All stats must be non-negative.\n    for (int i = 0; i < AllocatorStatCount; i++)\n      s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;\n  }\n\n private:\n  mutable StaticSpinMutex mu_;\n};\n\n\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_asm.h",
    "content": "//===-- sanitizer_asm.h -----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Various support for assembler.\n//\n//===----------------------------------------------------------------------===//\n\n// Some toolchains do not support .cfi asm directives, so we have to hide\n// them inside macros.\n#if defined(__clang__) ||                                                      \\\n    (defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM))\n  // GCC defined __GCC_HAVE_DWARF2_CFI_ASM if it supports CFI.\n  // Clang seems to support CFI by default (or not?).\n  // We need two versions of macros: for inline asm and standalone asm files.\n# define CFI_INL_ADJUST_CFA_OFFSET(n) \".cfi_adjust_cfa_offset \" #n \";\"\n\n# define CFI_STARTPROC .cfi_startproc\n# define CFI_ENDPROC .cfi_endproc\n# define CFI_ADJUST_CFA_OFFSET(n) .cfi_adjust_cfa_offset n\n# define CFI_DEF_CFA_OFFSET(n) .cfi_def_cfa_offset n\n# define CFI_REL_OFFSET(reg, n) .cfi_rel_offset reg, n\n# define CFI_OFFSET(reg, n) .cfi_offset reg, n\n# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg\n# define CFI_DEF_CFA(reg, n) .cfi_def_cfa reg, n\n# define CFI_RESTORE(reg) .cfi_restore reg\n\n#else  // No CFI\n# define CFI_INL_ADJUST_CFA_OFFSET(n)\n# define CFI_STARTPROC\n# define CFI_ENDPROC\n# define CFI_ADJUST_CFA_OFFSET(n)\n# define CFI_DEF_CFA_OFFSET(n)\n# define CFI_REL_OFFSET(reg, n)\n# define CFI_OFFSET(reg, n)\n# define CFI_DEF_CFA_REGISTER(reg)\n# define CFI_DEF_CFA(reg, n)\n# define CFI_RESTORE(reg)\n#endif\n\n#if !defined(__APPLE__)\n# define ASM_HIDDEN(symbol) .hidden symbol\n# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function\n# define ASM_SIZE(symbol) .size symbol, .-symbol\n# define ASM_SYMBOL(symbol) symbol\n# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol\n# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol\n#else\n# define ASM_HIDDEN(symbol)\n# define ASM_TYPE_FUNCTION(symbol)\n# define ASM_SIZE(symbol)\n# define ASM_SYMBOL(symbol) _##symbol\n# define ASM_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol\n# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol\n#endif\n\n#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \\\n                         defined(__Fuchsia__) || defined(__linux__))\n// clang-format off\n#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,\"\",%progbits\n// clang-format on\n#else\n#define NO_EXEC_STACK_DIRECTIVE\n#endif\n\n#if (defined(__x86_64__) || defined(__i386__)) && defined(__has_include) && __has_include(<cet.h>)\n#include <cet.h>\n#endif\n#ifndef _CET_ENDBR\n#define _CET_ENDBR\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_atomic.h",
    "content": "//===-- sanitizer_atomic.h --------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ATOMIC_H\n#define SANITIZER_ATOMIC_H\n\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\nenum memory_order {\n  memory_order_relaxed = 1 << 0,\n  memory_order_consume = 1 << 1,\n  memory_order_acquire = 1 << 2,\n  memory_order_release = 1 << 3,\n  memory_order_acq_rel = 1 << 4,\n  memory_order_seq_cst = 1 << 5\n};\n\nstruct atomic_uint8_t {\n  typedef u8 Type;\n  volatile Type val_dont_use;\n};\n\nstruct atomic_uint16_t {\n  typedef u16 Type;\n  volatile Type val_dont_use;\n};\n\nstruct atomic_sint32_t {\n  typedef s32 Type;\n  volatile Type val_dont_use;\n};\n\nstruct atomic_uint32_t {\n  typedef u32 Type;\n  volatile Type val_dont_use;\n};\n\nstruct atomic_uint64_t {\n  typedef u64 Type;\n  // On 32-bit platforms u64 is not necessary aligned on 8 bytes.\n  volatile ALIGNED(8) Type val_dont_use;\n};\n\nstruct atomic_uintptr_t {\n  typedef uptr Type;\n  volatile Type val_dont_use;\n};\n\n}  // namespace __sanitizer\n\n#if defined(__clang__) || defined(__GNUC__)\n# include \"sanitizer_atomic_clang.h\"\n#elif defined(_MSC_VER)\n# include \"sanitizer_atomic_msvc.h\"\n#else\n# error \"Unsupported compiler\"\n#endif\n\nnamespace __sanitizer {\n\n// Clutter-reducing helpers.\n\ntemplate<typename T>\ninline typename T::Type atomic_load_relaxed(const volatile T *a) {\n  return atomic_load(a, memory_order_relaxed);\n}\n\ntemplate<typename T>\ninline void atomic_store_relaxed(volatile T *a, typename T::Type v) {\n  atomic_store(a, v, memory_order_relaxed);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_ATOMIC_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_atomic_clang.h",
    "content": "//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Not intended for direct inclusion. Include sanitizer_atomic.h.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ATOMIC_CLANG_H\n#define SANITIZER_ATOMIC_CLANG_H\n\n#if defined(__i386__) || defined(__x86_64__)\n# include \"sanitizer_atomic_clang_x86.h\"\n#else\n# include \"sanitizer_atomic_clang_other.h\"\n#endif\n\nnamespace __sanitizer {\n\n// We would like to just use compiler builtin atomic operations\n// for loads and stores, but they are mostly broken in clang:\n// - they lead to vastly inefficient code generation\n// (http://llvm.org/bugs/show_bug.cgi?id=17281)\n// - 64-bit atomic operations are not implemented on x86_32\n// (http://llvm.org/bugs/show_bug.cgi?id=15034)\n// - they are not implemented on ARM\n// error: undefined reference to '__atomic_load_4'\n\n// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html\n// for mappings of the memory model to different processors.\n\ninline void atomic_signal_fence(memory_order) {\n  __asm__ __volatile__(\"\" ::: \"memory\");\n}\n\ninline void atomic_thread_fence(memory_order) {\n  __sync_synchronize();\n}\n\ntemplate<typename T>\ninline typename T::Type atomic_fetch_add(volatile T *a,\n    typename T::Type v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return __sync_fetch_and_add(&a->val_dont_use, v);\n}\n\ntemplate<typename T>\ninline typename T::Type atomic_fetch_sub(volatile T *a,\n    typename T::Type v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return __sync_fetch_and_add(&a->val_dont_use, -v);\n}\n\ntemplate<typename T>\ninline typename T::Type atomic_exchange(volatile T *a,\n    typename T::Type v, memory_order mo) {\n  DCHECK(!((uptr)a % sizeof(*a)));\n  if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))\n    __sync_synchronize();\n  v = __sync_lock_test_and_set(&a->val_dont_use, v);\n  if (mo == memory_order_seq_cst)\n    __sync_synchronize();\n  return v;\n}\n\ntemplate <typename T>\ninline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,\n                                           typename T::Type xchg,\n                                           memory_order mo) {\n  // Transitioned from __sync_val_compare_and_swap to support targets like\n  // SPARC V8 that cannot inline atomic cmpxchg.  __atomic_compare_exchange\n  // can then be resolved from libatomic.  __ATOMIC_SEQ_CST is used to best\n  // match the __sync builtin memory order.\n  return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,\n                                   __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);\n}\n\ntemplate<typename T>\ninline bool atomic_compare_exchange_weak(volatile T *a,\n                                         typename T::Type *cmp,\n                                         typename T::Type xchg,\n                                         memory_order mo) {\n  return atomic_compare_exchange_strong(a, cmp, xchg, mo);\n}\n\n}  // namespace __sanitizer\n\n// This include provides explicit template instantiations for atomic_uint64_t\n// on MIPS32, which does not directly support 8 byte atomics. It has to\n// proceed the template definitions above.\n#if defined(_MIPS_SIM) && defined(_ABIO32)\n  #include \"sanitizer_atomic_clang_mips.h\"\n#endif\n\n#undef ATOMIC_ORDER\n\n#endif  // SANITIZER_ATOMIC_CLANG_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_atomic_clang_mips.h",
    "content": "//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Not intended for direct inclusion. Include sanitizer_atomic.h.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H\n#define SANITIZER_ATOMIC_CLANG_MIPS_H\n\nnamespace __sanitizer {\n\n// MIPS32 does not support atomics > 4 bytes. To address this lack of\n// functionality, the sanitizer library provides helper methods which use an\n// internal spin lock mechanism to emulate atomic operations when the size is\n// 8 bytes.\nstatic void __spin_lock(volatile int *lock) {\n  while (__sync_lock_test_and_set(lock, 1))\n    while (*lock) {\n    }\n}\n\nstatic void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }\n\n// Make sure the lock is on its own cache line to prevent false sharing.\n// Put it inside a struct that is aligned and padded to the typical MIPS\n// cacheline which is 32 bytes.\nstatic struct {\n  int lock;\n  char pad[32 - sizeof(int)];\n} __attribute__((aligned(32))) lock = {0, {0}};\n\ntemplate <>\ninline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,\n                                              atomic_uint64_t::Type val,\n                                              memory_order mo) {\n  DCHECK(mo &\n         (memory_order_relaxed | memory_order_release | memory_order_seq_cst));\n  DCHECK(!((uptr)ptr % sizeof(*ptr)));\n\n  atomic_uint64_t::Type ret;\n\n  __spin_lock(&lock.lock);\n  ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));\n  ptr->val_dont_use = ret + val;\n  __spin_unlock(&lock.lock);\n\n  return ret;\n}\n\ntemplate <>\ninline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,\n                                              atomic_uint64_t::Type val,\n                                              memory_order mo) {\n  return atomic_fetch_add(ptr, -val, mo);\n}\n\ntemplate <>\ninline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,\n                                           atomic_uint64_t::Type *cmp,\n                                           atomic_uint64_t::Type xchg,\n                                           memory_order mo) {\n  DCHECK(mo &\n         (memory_order_relaxed | memory_order_release | memory_order_seq_cst));\n  DCHECK(!((uptr)ptr % sizeof(*ptr)));\n\n  typedef atomic_uint64_t::Type Type;\n  Type cmpv = *cmp;\n  Type prev;\n  bool ret = false;\n\n  __spin_lock(&lock.lock);\n  prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));\n  if (prev == cmpv) {\n    ret = true;\n    ptr->val_dont_use = xchg;\n  }\n  __spin_unlock(&lock.lock);\n\n  return ret;\n}\n\ntemplate <>\ninline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,\n                                         memory_order mo) {\n  DCHECK(mo &\n         (memory_order_relaxed | memory_order_release | memory_order_seq_cst));\n  DCHECK(!((uptr)ptr % sizeof(*ptr)));\n\n  atomic_uint64_t::Type zero = 0;\n  volatile atomic_uint64_t *Newptr =\n      const_cast<volatile atomic_uint64_t *>(ptr);\n  return atomic_fetch_add(Newptr, zero, mo);\n}\n\ntemplate <>\ninline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,\n                         memory_order mo) {\n  DCHECK(mo &\n         (memory_order_relaxed | memory_order_release | memory_order_seq_cst));\n  DCHECK(!((uptr)ptr % sizeof(*ptr)));\n\n  __spin_lock(&lock.lock);\n  ptr->val_dont_use = v;\n  __spin_unlock(&lock.lock);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_ATOMIC_CLANG_MIPS_H\n\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_atomic_clang_other.h",
    "content": "//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Not intended for direct inclusion. Include sanitizer_atomic.h.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H\n#define SANITIZER_ATOMIC_CLANG_OTHER_H\n\nnamespace __sanitizer {\n\n\ninline void proc_yield(int cnt) {\n  __asm__ __volatile__(\"\" ::: \"memory\");\n}\n\ntemplate<typename T>\ninline typename T::Type atomic_load(\n    const volatile T *a, memory_order mo) {\n  DCHECK(mo & (memory_order_relaxed | memory_order_consume\n      | memory_order_acquire | memory_order_seq_cst));\n  DCHECK(!((uptr)a % sizeof(*a)));\n  typename T::Type v;\n\n  if (sizeof(*a) < 8 || sizeof(void*) == 8) {\n    // Assume that aligned loads are atomic.\n    if (mo == memory_order_relaxed) {\n      v = a->val_dont_use;\n    } else if (mo == memory_order_consume) {\n      // Assume that processor respects data dependencies\n      // (and that compiler won't break them).\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      v = a->val_dont_use;\n      __asm__ __volatile__(\"\" ::: \"memory\");\n    } else if (mo == memory_order_acquire) {\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      v = a->val_dont_use;\n      __sync_synchronize();\n    } else {  // seq_cst\n      // E.g. on POWER we need a hw fence even before the store.\n      __sync_synchronize();\n      v = a->val_dont_use;\n      __sync_synchronize();\n    }\n  } else {\n    __atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,\n                  __ATOMIC_SEQ_CST);\n  }\n  return v;\n}\n\ntemplate<typename T>\ninline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {\n  DCHECK(mo & (memory_order_relaxed | memory_order_release\n      | memory_order_seq_cst));\n  DCHECK(!((uptr)a % sizeof(*a)));\n\n  if (sizeof(*a) < 8 || sizeof(void*) == 8) {\n    // Assume that aligned loads are atomic.\n    if (mo == memory_order_relaxed) {\n      a->val_dont_use = v;\n    } else if (mo == memory_order_release) {\n      __sync_synchronize();\n      a->val_dont_use = v;\n      __asm__ __volatile__(\"\" ::: \"memory\");\n    } else {  // seq_cst\n      __sync_synchronize();\n      a->val_dont_use = v;\n      __sync_synchronize();\n    }\n  } else {\n    __atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);\n  }\n}\n\n}  // namespace __sanitizer\n\n#endif  // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_atomic_clang_x86.h",
    "content": "//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Not intended for direct inclusion. Include sanitizer_atomic.h.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ATOMIC_CLANG_X86_H\n#define SANITIZER_ATOMIC_CLANG_X86_H\n\nnamespace __sanitizer {\n\ninline void proc_yield(int cnt) {\n  __asm__ __volatile__(\"\" ::: \"memory\");\n  for (int i = 0; i < cnt; i++)\n    __asm__ __volatile__(\"pause\");\n  __asm__ __volatile__(\"\" ::: \"memory\");\n}\n\ntemplate<typename T>\ninline typename T::Type atomic_load(\n    const volatile T *a, memory_order mo) {\n  DCHECK(mo & (memory_order_relaxed | memory_order_consume\n      | memory_order_acquire | memory_order_seq_cst));\n  DCHECK(!((uptr)a % sizeof(*a)));\n  typename T::Type v;\n\n  if (sizeof(*a) < 8 || sizeof(void*) == 8) {\n    // Assume that aligned loads are atomic.\n    if (mo == memory_order_relaxed) {\n      v = a->val_dont_use;\n    } else if (mo == memory_order_consume) {\n      // Assume that processor respects data dependencies\n      // (and that compiler won't break them).\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      v = a->val_dont_use;\n      __asm__ __volatile__(\"\" ::: \"memory\");\n    } else if (mo == memory_order_acquire) {\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      v = a->val_dont_use;\n      // On x86 loads are implicitly acquire.\n      __asm__ __volatile__(\"\" ::: \"memory\");\n    } else {  // seq_cst\n      // On x86 plain MOV is enough for seq_cst store.\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      v = a->val_dont_use;\n      __asm__ __volatile__(\"\" ::: \"memory\");\n    }\n  } else {\n    // 64-bit load on 32-bit platform.\n    __asm__ __volatile__(\n        \"movq %1, %%mm0;\"  // Use mmx reg for 64-bit atomic moves\n        \"movq %%mm0, %0;\"  // (ptr could be read-only)\n        \"emms;\"            // Empty mmx state/Reset FP regs\n        : \"=m\" (v)\n        : \"m\" (a->val_dont_use)\n        : // mark the mmx registers as clobbered\n#ifdef __MMX__\n          \"mm0\", \"mm1\", \"mm2\", \"mm3\", \"mm4\", \"mm5\", \"mm6\", \"mm7\",\n#endif  // #ifdef __MMX__\n          \"memory\");\n  }\n  return v;\n}\n\ntemplate<typename T>\ninline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {\n  DCHECK(mo & (memory_order_relaxed | memory_order_release\n      | memory_order_seq_cst));\n  DCHECK(!((uptr)a % sizeof(*a)));\n\n  if (sizeof(*a) < 8 || sizeof(void*) == 8) {\n    // Assume that aligned loads are atomic.\n    if (mo == memory_order_relaxed) {\n      a->val_dont_use = v;\n    } else if (mo == memory_order_release) {\n      // On x86 stores are implicitly release.\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      a->val_dont_use = v;\n      __asm__ __volatile__(\"\" ::: \"memory\");\n    } else {  // seq_cst\n      // On x86 stores are implicitly release.\n      __asm__ __volatile__(\"\" ::: \"memory\");\n      a->val_dont_use = v;\n      __sync_synchronize();\n    }\n  } else {\n    // 64-bit store on 32-bit platform.\n    __asm__ __volatile__(\n        \"movq %1, %%mm0;\"  // Use mmx reg for 64-bit atomic moves\n        \"movq %%mm0, %0;\"\n        \"emms;\"            // Empty mmx state/Reset FP regs\n        : \"=m\" (a->val_dont_use)\n        : \"m\" (v)\n        : // mark the mmx registers as clobbered\n#ifdef __MMX__\n          \"mm0\", \"mm1\", \"mm2\", \"mm3\", \"mm4\", \"mm5\", \"mm6\", \"mm7\",\n#endif  // #ifdef __MMX__\n          \"memory\");\n    if (mo == memory_order_seq_cst)\n      __sync_synchronize();\n  }\n}\n\n}  // namespace __sanitizer\n\n#endif  // #ifndef SANITIZER_ATOMIC_CLANG_X86_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_atomic_msvc.h",
    "content": "//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Not intended for direct inclusion. Include sanitizer_atomic.h.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ATOMIC_MSVC_H\n#define SANITIZER_ATOMIC_MSVC_H\n\nextern \"C\" void _ReadWriteBarrier();\n#pragma intrinsic(_ReadWriteBarrier)\nextern \"C\" void _mm_mfence();\n#pragma intrinsic(_mm_mfence)\nextern \"C\" void _mm_pause();\n#pragma intrinsic(_mm_pause)\nextern \"C\" char _InterlockedExchange8(char volatile *Addend, char Value);\n#pragma intrinsic(_InterlockedExchange8)\nextern \"C\" short _InterlockedExchange16(short volatile *Addend, short Value);\n#pragma intrinsic(_InterlockedExchange16)\nextern \"C\" long _InterlockedExchange(long volatile *Addend, long Value);\n#pragma intrinsic(_InterlockedExchange)\nextern \"C\" long _InterlockedExchangeAdd(long volatile *Addend, long Value);\n#pragma intrinsic(_InterlockedExchangeAdd)\nextern \"C\" char _InterlockedCompareExchange8(char volatile *Destination,\n                                             char Exchange, char Comparand);\n#pragma intrinsic(_InterlockedCompareExchange8)\nextern \"C\" short _InterlockedCompareExchange16(short volatile *Destination,\n                                               short Exchange, short Comparand);\n#pragma intrinsic(_InterlockedCompareExchange16)\nextern \"C\" long long _InterlockedCompareExchange64(\n    long long volatile *Destination, long long Exchange, long long Comparand);\n#pragma intrinsic(_InterlockedCompareExchange64)\nextern \"C\" void *_InterlockedCompareExchangePointer(\n    void *volatile *Destination,\n    void *Exchange, void *Comparand);\n#pragma intrinsic(_InterlockedCompareExchangePointer)\nextern \"C\" long __cdecl _InterlockedCompareExchange(long volatile *Destination,\n                                                    long Exchange,\n                                                    long Comparand);\n#pragma intrinsic(_InterlockedCompareExchange)\n\n#ifdef _WIN64\nextern \"C\" long long _InterlockedExchangeAdd64(long long volatile *Addend,\n                                               long long Value);\n#pragma intrinsic(_InterlockedExchangeAdd64)\n#endif\n\nnamespace __sanitizer {\n\ninline void atomic_signal_fence(memory_order) {\n  _ReadWriteBarrier();\n}\n\ninline void atomic_thread_fence(memory_order) {\n  _mm_mfence();\n}\n\ninline void proc_yield(int cnt) {\n  for (int i = 0; i < cnt; i++)\n    _mm_pause();\n}\n\ntemplate<typename T>\ninline typename T::Type atomic_load(\n    const volatile T *a, memory_order mo) {\n  DCHECK(mo & (memory_order_relaxed | memory_order_consume\n      | memory_order_acquire | memory_order_seq_cst));\n  DCHECK(!((uptr)a % sizeof(*a)));\n  typename T::Type v;\n  // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.\n  if (mo == memory_order_relaxed) {\n    v = a->val_dont_use;\n  } else {\n    atomic_signal_fence(memory_order_seq_cst);\n    v = a->val_dont_use;\n    atomic_signal_fence(memory_order_seq_cst);\n  }\n  return v;\n}\n\ntemplate<typename T>\ninline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {\n  DCHECK(mo & (memory_order_relaxed | memory_order_release\n      | memory_order_seq_cst));\n  DCHECK(!((uptr)a % sizeof(*a)));\n  // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.\n  if (mo == memory_order_relaxed) {\n    a->val_dont_use = v;\n  } else {\n    atomic_signal_fence(memory_order_seq_cst);\n    a->val_dont_use = v;\n    atomic_signal_fence(memory_order_seq_cst);\n  }\n  if (mo == memory_order_seq_cst)\n    atomic_thread_fence(memory_order_seq_cst);\n}\n\ninline u32 atomic_fetch_add(volatile atomic_uint32_t *a,\n    u32 v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,\n                                      (long)v);\n}\n\ninline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,\n    uptr v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n#ifdef _WIN64\n  return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,\n                                         (long long)v);\n#else\n  return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,\n                                       (long)v);\n#endif\n}\n\ninline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,\n    u32 v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,\n                                      -(long)v);\n}\n\ninline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,\n    uptr v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n#ifdef _WIN64\n  return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,\n                                         -(long long)v);\n#else\n  return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,\n                                       -(long)v);\n#endif\n}\n\ninline u8 atomic_exchange(volatile atomic_uint8_t *a,\n    u8 v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);\n}\n\ninline u16 atomic_exchange(volatile atomic_uint16_t *a,\n    u16 v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);\n}\n\ninline u32 atomic_exchange(volatile atomic_uint32_t *a,\n    u32 v, memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);\n}\n\ninline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,\n                                           u8 *cmp,\n                                           u8 xchgv,\n                                           memory_order mo) {\n  (void)mo;\n  DCHECK(!((uptr)a % sizeof(*a)));\n  u8 cmpv = *cmp;\n#ifdef _WIN64\n  u8 prev = (u8)_InterlockedCompareExchange8(\n      (volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);\n#else\n  u8 prev;\n  __asm {\n    mov al, cmpv\n    mov ecx, a\n    mov dl, xchgv\n    lock cmpxchg [ecx], dl\n    mov prev, al\n  }\n#endif\n  if (prev == cmpv)\n    return true;\n  *cmp = prev;\n  return false;\n}\n\ninline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,\n                                           uptr *cmp,\n                                           uptr xchg,\n                                           memory_order mo) {\n  uptr cmpv = *cmp;\n  uptr prev = (uptr)_InterlockedCompareExchangePointer(\n      (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);\n  if (prev == cmpv)\n    return true;\n  *cmp = prev;\n  return false;\n}\n\ninline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,\n                                           u16 *cmp,\n                                           u16 xchg,\n                                           memory_order mo) {\n  u16 cmpv = *cmp;\n  u16 prev = (u16)_InterlockedCompareExchange16(\n      (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);\n  if (prev == cmpv)\n    return true;\n  *cmp = prev;\n  return false;\n}\n\ninline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,\n                                           u32 *cmp,\n                                           u32 xchg,\n                                           memory_order mo) {\n  u32 cmpv = *cmp;\n  u32 prev = (u32)_InterlockedCompareExchange(\n      (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);\n  if (prev == cmpv)\n    return true;\n  *cmp = prev;\n  return false;\n}\n\ninline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,\n                                           u64 *cmp,\n                                           u64 xchg,\n                                           memory_order mo) {\n  u64 cmpv = *cmp;\n  u64 prev = (u64)_InterlockedCompareExchange64(\n      (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);\n  if (prev == cmpv)\n    return true;\n  *cmp = prev;\n  return false;\n}\n\ntemplate<typename T>\ninline bool atomic_compare_exchange_weak(volatile T *a,\n                                         typename T::Type *cmp,\n                                         typename T::Type xchg,\n                                         memory_order mo) {\n  return atomic_compare_exchange_strong(a, cmp, xchg, mo);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_ATOMIC_CLANG_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_bitvector.h",
    "content": "//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Specializer BitVector implementation.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_BITVECTOR_H\n#define SANITIZER_BITVECTOR_H\n\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\n// Fixed size bit vector based on a single basic integer.\ntemplate <class basic_int_t = uptr>\nclass BasicBitVector {\n public:\n  enum SizeEnum : uptr { kSize = sizeof(basic_int_t) * 8 };\n\n  uptr size() const { return kSize; }\n  // No CTOR.\n  void clear() { bits_ = 0; }\n  void setAll() { bits_ = ~(basic_int_t)0; }\n  bool empty() const { return bits_ == 0; }\n\n  // Returns true if the bit has changed from 0 to 1.\n  bool setBit(uptr idx) {\n    basic_int_t old = bits_;\n    bits_ |= mask(idx);\n    return bits_ != old;\n  }\n\n  // Returns true if the bit has changed from 1 to 0.\n  bool clearBit(uptr idx) {\n    basic_int_t old = bits_;\n    bits_ &= ~mask(idx);\n    return bits_ != old;\n  }\n\n  bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }\n\n  uptr getAndClearFirstOne() {\n    CHECK(!empty());\n    uptr idx = LeastSignificantSetBitIndex(bits_);\n    clearBit(idx);\n    return idx;\n  }\n\n  // Do \"this |= v\" and return whether new bits have been added.\n  bool setUnion(const BasicBitVector &v) {\n    basic_int_t old = bits_;\n    bits_ |= v.bits_;\n    return bits_ != old;\n  }\n\n  // Do \"this &= v\" and return whether any bits have been removed.\n  bool setIntersection(const BasicBitVector &v) {\n    basic_int_t old = bits_;\n    bits_ &= v.bits_;\n    return bits_ != old;\n  }\n\n  // Do \"this &= ~v\" and return whether any bits have been removed.\n  bool setDifference(const BasicBitVector &v) {\n    basic_int_t old = bits_;\n    bits_ &= ~v.bits_;\n    return bits_ != old;\n  }\n\n  void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }\n\n  // Returns true if 'this' intersects with 'v'.\n  bool intersectsWith(const BasicBitVector &v) const {\n    return (bits_ & v.bits_) != 0;\n  }\n\n  // for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {\n  //   uptr idx = it.next();\n  //   use(idx);\n  // }\n  class Iterator {\n   public:\n    Iterator() { }\n    explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}\n    bool hasNext() const { return !bv_.empty(); }\n    uptr next() { return bv_.getAndClearFirstOne(); }\n    void clear() { bv_.clear(); }\n   private:\n    BasicBitVector bv_;\n  };\n\n private:\n  basic_int_t mask(uptr idx) const {\n    CHECK_LT(idx, size());\n    return (basic_int_t)1UL << idx;\n  }\n  basic_int_t bits_;\n};\n\n// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.\n// The implementation is optimized for better performance on\n// sparse bit vectors, i.e. the those with few set bits.\ntemplate <uptr kLevel1Size = 1, class BV = BasicBitVector<> >\nclass TwoLevelBitVector {\n  // This is essentially a 2-level bit vector.\n  // Set bit in the first level BV indicates that there are set bits\n  // in the corresponding BV of the second level.\n  // This structure allows O(kLevel1Size) time for clear() and empty(),\n  // as well fast handling of sparse BVs.\n public:\n  enum SizeEnum : uptr { kSize = BV::kSize * BV::kSize * kLevel1Size };\n  // No CTOR.\n\n  uptr size() const { return kSize; }\n\n  void clear() {\n    for (uptr i = 0; i < kLevel1Size; i++)\n      l1_[i].clear();\n  }\n\n  void setAll() {\n    for (uptr i0 = 0; i0 < kLevel1Size; i0++) {\n      l1_[i0].setAll();\n      for (uptr i1 = 0; i1 < BV::kSize; i1++)\n        l2_[i0][i1].setAll();\n    }\n  }\n\n  bool empty() const {\n    for (uptr i = 0; i < kLevel1Size; i++)\n      if (!l1_[i].empty())\n        return false;\n    return true;\n  }\n\n  // Returns true if the bit has changed from 0 to 1.\n  bool setBit(uptr idx) {\n    check(idx);\n    uptr i0 = idx0(idx);\n    uptr i1 = idx1(idx);\n    uptr i2 = idx2(idx);\n    if (!l1_[i0].getBit(i1)) {\n      l1_[i0].setBit(i1);\n      l2_[i0][i1].clear();\n    }\n    bool res = l2_[i0][i1].setBit(i2);\n    // Printf(\"%s: %zd => %zd %zd %zd; %d\\n\", __func__,\n    // idx, i0, i1, i2, res);\n    return res;\n  }\n\n  bool clearBit(uptr idx) {\n    check(idx);\n    uptr i0 = idx0(idx);\n    uptr i1 = idx1(idx);\n    uptr i2 = idx2(idx);\n    bool res = false;\n    if (l1_[i0].getBit(i1)) {\n      res = l2_[i0][i1].clearBit(i2);\n      if (l2_[i0][i1].empty())\n        l1_[i0].clearBit(i1);\n    }\n    return res;\n  }\n\n  bool getBit(uptr idx) const {\n    check(idx);\n    uptr i0 = idx0(idx);\n    uptr i1 = idx1(idx);\n    uptr i2 = idx2(idx);\n    // Printf(\"%s: %zd => %zd %zd %zd\\n\", __func__, idx, i0, i1, i2);\n    return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);\n  }\n\n  uptr getAndClearFirstOne() {\n    for (uptr i0 = 0; i0 < kLevel1Size; i0++) {\n      if (l1_[i0].empty()) continue;\n      uptr i1 = l1_[i0].getAndClearFirstOne();\n      uptr i2 = l2_[i0][i1].getAndClearFirstOne();\n      if (!l2_[i0][i1].empty())\n        l1_[i0].setBit(i1);\n      uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;\n      // Printf(\"getAndClearFirstOne: %zd %zd %zd => %zd\\n\", i0, i1, i2, res);\n      return res;\n    }\n    CHECK(0);\n    return 0;\n  }\n\n  // Do \"this |= v\" and return whether new bits have been added.\n  bool setUnion(const TwoLevelBitVector &v) {\n    bool res = false;\n    for (uptr i0 = 0; i0 < kLevel1Size; i0++) {\n      BV t = v.l1_[i0];\n      while (!t.empty()) {\n        uptr i1 = t.getAndClearFirstOne();\n        if (l1_[i0].setBit(i1))\n          l2_[i0][i1].clear();\n        if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))\n          res = true;\n      }\n    }\n    return res;\n  }\n\n  // Do \"this &= v\" and return whether any bits have been removed.\n  bool setIntersection(const TwoLevelBitVector &v) {\n    bool res = false;\n    for (uptr i0 = 0; i0 < kLevel1Size; i0++) {\n      if (l1_[i0].setIntersection(v.l1_[i0]))\n        res = true;\n      if (!l1_[i0].empty()) {\n        BV t = l1_[i0];\n        while (!t.empty()) {\n          uptr i1 = t.getAndClearFirstOne();\n          if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))\n            res = true;\n          if (l2_[i0][i1].empty())\n            l1_[i0].clearBit(i1);\n        }\n      }\n    }\n    return res;\n  }\n\n  // Do \"this &= ~v\" and return whether any bits have been removed.\n  bool setDifference(const TwoLevelBitVector &v) {\n    bool res = false;\n    for (uptr i0 = 0; i0 < kLevel1Size; i0++) {\n      BV t = l1_[i0];\n      t.setIntersection(v.l1_[i0]);\n      while (!t.empty()) {\n        uptr i1 = t.getAndClearFirstOne();\n        if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))\n          res = true;\n        if (l2_[i0][i1].empty())\n          l1_[i0].clearBit(i1);\n      }\n    }\n    return res;\n  }\n\n  void copyFrom(const TwoLevelBitVector &v) {\n    clear();\n    setUnion(v);\n  }\n\n  // Returns true if 'this' intersects with 'v'.\n  bool intersectsWith(const TwoLevelBitVector &v) const {\n    for (uptr i0 = 0; i0 < kLevel1Size; i0++) {\n      BV t = l1_[i0];\n      t.setIntersection(v.l1_[i0]);\n      while (!t.empty()) {\n        uptr i1 = t.getAndClearFirstOne();\n        if (!v.l1_[i0].getBit(i1)) continue;\n        if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))\n          return true;\n      }\n    }\n    return false;\n  }\n\n  // for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {\n  //   uptr idx = it.next();\n  //   use(idx);\n  // }\n  class Iterator {\n   public:\n    Iterator() { }\n    explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {\n      it1_.clear();\n      it2_.clear();\n    }\n\n    bool hasNext() const {\n      if (it1_.hasNext()) return true;\n      for (uptr i = i0_; i < kLevel1Size; i++)\n        if (!bv_.l1_[i].empty()) return true;\n      return false;\n    }\n\n    uptr next() {\n      // Printf(\"++++: %zd %zd; %d %d; size %zd\\n\", i0_, i1_, it1_.hasNext(),\n      //       it2_.hasNext(), kSize);\n      if (!it1_.hasNext() && !it2_.hasNext()) {\n        for (; i0_ < kLevel1Size; i0_++) {\n          if (bv_.l1_[i0_].empty()) continue;\n          it1_ = typename BV::Iterator(bv_.l1_[i0_]);\n          // Printf(\"+i0: %zd %zd; %d %d; size %zd\\n\", i0_, i1_, it1_.hasNext(),\n          //   it2_.hasNext(), kSize);\n          break;\n        }\n      }\n      if (!it2_.hasNext()) {\n        CHECK(it1_.hasNext());\n        i1_ = it1_.next();\n        it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);\n        // Printf(\"++i1: %zd %zd; %d %d; size %zd\\n\", i0_, i1_, it1_.hasNext(),\n        //       it2_.hasNext(), kSize);\n      }\n      CHECK(it2_.hasNext());\n      uptr i2 = it2_.next();\n      uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;\n      // Printf(\"+ret: %zd %zd; %d %d; size %zd; res: %zd\\n\", i0_, i1_,\n      //       it1_.hasNext(), it2_.hasNext(), kSize, res);\n      if (!it1_.hasNext() && !it2_.hasNext())\n        i0_++;\n      return res;\n    }\n\n   private:\n    const TwoLevelBitVector &bv_;\n    uptr i0_, i1_;\n    typename BV::Iterator it1_, it2_;\n  };\n\n private:\n  void check(uptr idx) const { CHECK_LE(idx, size()); }\n\n  uptr idx0(uptr idx) const {\n    uptr res = idx / (BV::kSize * BV::kSize);\n    CHECK_LE(res, kLevel1Size);\n    return res;\n  }\n\n  uptr idx1(uptr idx) const {\n    uptr res = (idx / BV::kSize) % BV::kSize;\n    CHECK_LE(res, BV::kSize);\n    return res;\n  }\n\n  uptr idx2(uptr idx) const {\n    uptr res = idx % BV::kSize;\n    CHECK_LE(res, BV::kSize);\n    return res;\n  }\n\n  BV l1_[kLevel1Size];\n  BV l2_[kLevel1Size][BV::kSize];\n};\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_BITVECTOR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_bvgraph.h",
    "content": "//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// BVGraph -- a directed graph.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_BVGRAPH_H\n#define SANITIZER_BVGRAPH_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_bitvector.h\"\n\nnamespace __sanitizer {\n\n// Directed graph of fixed size implemented as an array of bit vectors.\n// Not thread-safe, all accesses should be protected by an external lock.\ntemplate<class BV>\nclass BVGraph {\n public:\n  enum SizeEnum : uptr { kSize = BV::kSize };\n  uptr size() const { return kSize; }\n  // No CTOR.\n  void clear() {\n    for (uptr i = 0; i < size(); i++)\n      v[i].clear();\n  }\n\n  bool empty() const {\n    for (uptr i = 0; i < size(); i++)\n      if (!v[i].empty())\n        return false;\n    return true;\n  }\n\n  // Returns true if a new edge was added.\n  bool addEdge(uptr from, uptr to) {\n    check(from, to);\n    return v[from].setBit(to);\n  }\n\n  // Returns true if at least one new edge was added.\n  uptr addEdges(const BV &from, uptr to, uptr added_edges[],\n                uptr max_added_edges) {\n    uptr res = 0;\n    t1.copyFrom(from);\n    while (!t1.empty()) {\n      uptr node = t1.getAndClearFirstOne();\n      if (v[node].setBit(to))\n        if (res < max_added_edges)\n          added_edges[res++] = node;\n    }\n    return res;\n  }\n\n  // *EXPERIMENTAL*\n  // Returns true if an edge from=>to exist.\n  // This function does not use any global state except for 'this' itself,\n  // and thus can be called from different threads w/o locking.\n  // This would be racy.\n  // FIXME: investigate how much we can prove about this race being \"benign\".\n  bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }\n\n  // Returns true if the edge from=>to was removed.\n  bool removeEdge(uptr from, uptr to) {\n    return v[from].clearBit(to);\n  }\n\n  // Returns true if at least one edge *=>to was removed.\n  bool removeEdgesTo(const BV &to) {\n    bool res = 0;\n    for (uptr from = 0; from < size(); from++) {\n      if (v[from].setDifference(to))\n        res = true;\n    }\n    return res;\n  }\n\n  // Returns true if at least one edge from=>* was removed.\n  bool removeEdgesFrom(const BV &from) {\n    bool res = false;\n    t1.copyFrom(from);\n    while (!t1.empty()) {\n      uptr idx = t1.getAndClearFirstOne();\n      if (!v[idx].empty()) {\n        v[idx].clear();\n        res = true;\n      }\n    }\n    return res;\n  }\n\n  void removeEdgesFrom(uptr from) {\n    return v[from].clear();\n  }\n\n  bool hasEdge(uptr from, uptr to) const {\n    check(from, to);\n    return v[from].getBit(to);\n  }\n\n  // Returns true if there is a path from the node 'from'\n  // to any of the nodes in 'targets'.\n  bool isReachable(uptr from, const BV &targets) {\n    BV &to_visit = t1,\n       &visited = t2;\n    to_visit.copyFrom(v[from]);\n    visited.clear();\n    visited.setBit(from);\n    while (!to_visit.empty()) {\n      uptr idx = to_visit.getAndClearFirstOne();\n      if (visited.setBit(idx))\n        to_visit.setUnion(v[idx]);\n    }\n    return targets.intersectsWith(visited);\n  }\n\n  // Finds a path from 'from' to one of the nodes in 'target',\n  // stores up to 'path_size' items of the path into 'path',\n  // returns the path length, or 0 if there is no path of size 'path_size'.\n  uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {\n    if (path_size == 0)\n      return 0;\n    path[0] = from;\n    if (targets.getBit(from))\n      return 1;\n    // The function is recursive, so we don't want to create BV on stack.\n    // Instead of a getAndClearFirstOne loop we use the slower iterator.\n    for (typename BV::Iterator it(v[from]); it.hasNext(); ) {\n      uptr idx = it.next();\n      if (uptr res = findPath(idx, targets, path + 1, path_size - 1))\n        return res + 1;\n    }\n    return 0;\n  }\n\n  // Same as findPath, but finds a shortest path.\n  uptr findShortestPath(uptr from, const BV &targets, uptr *path,\n                        uptr path_size) {\n    for (uptr p = 1; p <= path_size; p++)\n      if (findPath(from, targets, path, p) == p)\n        return p;\n    return 0;\n  }\n\n private:\n  void check(uptr idx1, uptr idx2) const {\n    CHECK_LT(idx1, size());\n    CHECK_LT(idx2, size());\n  }\n  BV v[kSize];\n  // Keep temporary vectors here since we can not create large objects on stack.\n  BV t1, t2;\n};\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_BVGRAPH_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_chained_origin_depot.cpp",
    "content": "//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// A storage for chained origins.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_chained_origin_depot.h\"\n\n#include \"sanitizer_stackdepotbase.h\"\n\nnamespace __sanitizer {\n\nnamespace {\nstruct ChainedOriginDepotDesc {\n  u32 here_id;\n  u32 prev_id;\n};\n\nstruct ChainedOriginDepotNode {\n  using hash_type = u32;\n  u32 link;\n  u32 here_id;\n  u32 prev_id;\n\n  typedef ChainedOriginDepotDesc args_type;\n\n  bool eq(hash_type hash, const args_type &args) const;\n\n  static uptr allocated() { return 0; }\n\n  static hash_type hash(const args_type &args);\n\n  static bool is_valid(const args_type &args);\n\n  void store(u32 id, const args_type &args, hash_type other_hash);\n\n  args_type load(u32 id) const;\n\n  struct Handle {\n    const ChainedOriginDepotNode *node_ = nullptr;\n    u32 id_ = 0;\n    Handle(const ChainedOriginDepotNode *node, u32 id) : node_(node), id_(id) {}\n    bool valid() const { return node_; }\n    u32 id() const { return id_; }\n    int here_id() const { return node_->here_id; }\n    int prev_id() const { return node_->prev_id; }\n  };\n\n  static Handle get_handle(u32 id);\n\n  typedef Handle handle_type;\n};\n\n}  // namespace\n\nstatic StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;\n\nbool ChainedOriginDepotNode::eq(hash_type hash, const args_type &args) const {\n  return here_id == args.here_id && prev_id == args.prev_id;\n}\n\n/* This is murmur2 hash for the 64->32 bit case.\n   It does not behave all that well because the keys have a very biased\n   distribution (I've seen 7-element buckets with the table only 14% full).\n\n   here_id is built of\n   * (1 bits) Reserved, zero.\n   * (8 bits) Part id = bits 13..20 of the hash value of here_id's key.\n   * (23 bits) Sequential number (each part has each own sequence).\n\n   prev_id has either the same distribution as here_id (but with 3:8:21)\n   split, or one of two reserved values (-1) or (-2). Either case can\n   dominate depending on the workload.\n*/\nChainedOriginDepotNode::hash_type ChainedOriginDepotNode::hash(\n    const args_type &args) {\n  const u32 m = 0x5bd1e995;\n  const u32 seed = 0x9747b28c;\n  const u32 r = 24;\n  u32 h = seed;\n  u32 k = args.here_id;\n  k *= m;\n  k ^= k >> r;\n  k *= m;\n  h *= m;\n  h ^= k;\n\n  k = args.prev_id;\n  k *= m;\n  k ^= k >> r;\n  k *= m;\n  h *= m;\n  h ^= k;\n\n  h ^= h >> 13;\n  h *= m;\n  h ^= h >> 15;\n  return h;\n}\n\nbool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; }\n\nvoid ChainedOriginDepotNode::store(u32 id, const args_type &args,\n                                   hash_type other_hash) {\n  here_id = args.here_id;\n  prev_id = args.prev_id;\n}\n\nChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const {\n  args_type ret = {here_id, prev_id};\n  return ret;\n}\n\nChainedOriginDepotNode::Handle ChainedOriginDepotNode::get_handle(u32 id) {\n  return Handle(&depot.nodes[id], id);\n}\n\nChainedOriginDepot::ChainedOriginDepot() {}\n\nStackDepotStats ChainedOriginDepot::GetStats() const {\n  return depot.GetStats();\n}\n\nbool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {\n  ChainedOriginDepotDesc desc = {here_id, prev_id};\n  bool inserted;\n  *new_id = depot.Put(desc, &inserted);\n  return inserted;\n}\n\nu32 ChainedOriginDepot::Get(u32 id, u32 *other) {\n  ChainedOriginDepotDesc desc = depot.Get(id);\n  *other = desc.prev_id;\n  return desc.here_id;\n}\n\nvoid ChainedOriginDepot::LockAll() { depot.LockAll(); }\n\nvoid ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_chained_origin_depot.h",
    "content": "//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// A storage for chained origins.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H\n#define SANITIZER_CHAINED_ORIGIN_DEPOT_H\n\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\nclass ChainedOriginDepot {\n public:\n  ChainedOriginDepot();\n\n  // Gets the statistic of the origin chain storage.\n  StackDepotStats GetStats() const;\n\n  // Stores a chain with StackDepot ID here_id and previous chain ID prev_id.\n  // If successful, returns true and the new chain id new_id.\n  // If the same element already exists, returns false and sets new_id to the\n  // existing ID.\n  bool Put(u32 here_id, u32 prev_id, u32 *new_id);\n\n  // Retrieves the stored StackDepot ID for the given origin ID.\n  u32 Get(u32 id, u32 *other);\n\n  void LockAll();\n  void UnlockAll();\n\n private:\n  ChainedOriginDepot(const ChainedOriginDepot &) = delete;\n  void operator=(const ChainedOriginDepot &) = delete;\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_CHAINED_ORIGIN_DEPOT_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common.cpp",
    "content": "//===-- sanitizer_common.cpp ----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_allocator_interface.h\"\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_placement_new.h\"\n\nnamespace __sanitizer {\n\nconst char *SanitizerToolName = \"SanitizerTool\";\n\natomic_uint32_t current_verbosity;\nuptr PageSizeCached;\nu32 NumberOfCPUsCached;\n\n// PID of the tracer task in StopTheWorld. It shares the address space with the\n// main process, but has a different PID and thus requires special handling.\nuptr stoptheworld_tracer_pid = 0;\n// Cached pid of parent process - if the parent process dies, we want to keep\n// writing to the same log file.\nuptr stoptheworld_tracer_ppid = 0;\n\nvoid NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,\n                                      const char *mmap_type, error_t err,\n                                      bool raw_report) {\n  static int recursion_count;\n  if (raw_report || recursion_count) {\n    // If raw report is requested or we went into recursion just die.  The\n    // Report() and CHECK calls below may call mmap recursively and fail.\n    RawWrite(\"ERROR: Failed to mmap\\n\");\n    Die();\n  }\n  recursion_count++;\n  Report(\"ERROR: %s failed to \"\n         \"%s 0x%zx (%zd) bytes of %s (error code: %d)\\n\",\n         SanitizerToolName, mmap_type, size, size, mem_type, err);\n#if !SANITIZER_GO\n  DumpProcessMap();\n#endif\n  UNREACHABLE(\"unable to mmap\");\n}\n\ntypedef bool UptrComparisonFunction(const uptr &a, const uptr &b);\ntypedef bool U32ComparisonFunction(const u32 &a, const u32 &b);\n\nconst char *StripPathPrefix(const char *filepath,\n                            const char *strip_path_prefix) {\n  if (!filepath) return nullptr;\n  if (!strip_path_prefix) return filepath;\n  const char *res = filepath;\n  if (const char *pos = internal_strstr(filepath, strip_path_prefix))\n    res = pos + internal_strlen(strip_path_prefix);\n  if (res[0] == '.' && res[1] == '/')\n    res += 2;\n  return res;\n}\n\nconst char *StripModuleName(const char *module) {\n  if (!module)\n    return nullptr;\n  if (SANITIZER_WINDOWS) {\n    // On Windows, both slash and backslash are possible.\n    // Pick the one that goes last.\n    if (const char *bslash_pos = internal_strrchr(module, '\\\\'))\n      return StripModuleName(bslash_pos + 1);\n  }\n  if (const char *slash_pos = internal_strrchr(module, '/')) {\n    return slash_pos + 1;\n  }\n  return module;\n}\n\nvoid ReportErrorSummary(const char *error_message, const char *alt_tool_name) {\n  if (!common_flags()->print_summary)\n    return;\n  InternalScopedString buff;\n  buff.append(\"SUMMARY: %s: %s\",\n              alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);\n  __sanitizer_report_error_summary(buff.data());\n}\n\n// Removes the ANSI escape sequences from the input string (in-place).\nvoid RemoveANSIEscapeSequencesFromString(char *str) {\n  if (!str)\n    return;\n\n  // We are going to remove the escape sequences in place.\n  char *s = str;\n  char *z = str;\n  while (*s != '\\0') {\n    CHECK_GE(s, z);\n    // Skip over ANSI escape sequences with pointer 's'.\n    if (*s == '\\033' && *(s + 1) == '[') {\n      s = internal_strchrnul(s, 'm');\n      if (*s == '\\0') {\n        break;\n      }\n      s++;\n      continue;\n    }\n    // 's' now points at a character we want to keep. Copy over the buffer\n    // content if the escape sequence has been perviously skipped andadvance\n    // both pointers.\n    if (s != z)\n      *z = *s;\n\n    // If we have not seen an escape sequence, just advance both pointers.\n    z++;\n    s++;\n  }\n\n  // Null terminate the string.\n  *z = '\\0';\n}\n\nvoid LoadedModule::set(const char *module_name, uptr base_address) {\n  clear();\n  full_name_ = internal_strdup(module_name);\n  base_address_ = base_address;\n}\n\nvoid LoadedModule::set(const char *module_name, uptr base_address,\n                       ModuleArch arch, u8 uuid[kModuleUUIDSize],\n                       bool instrumented) {\n  set(module_name, base_address);\n  arch_ = arch;\n  internal_memcpy(uuid_, uuid, sizeof(uuid_));\n  uuid_size_ = kModuleUUIDSize;\n  instrumented_ = instrumented;\n}\n\nvoid LoadedModule::setUuid(const char *uuid, uptr size) {\n  if (size > kModuleUUIDSize)\n    size = kModuleUUIDSize;\n  internal_memcpy(uuid_, uuid, size);\n  uuid_size_ = size;\n}\n\nvoid LoadedModule::clear() {\n  InternalFree(full_name_);\n  base_address_ = 0;\n  max_executable_address_ = 0;\n  full_name_ = nullptr;\n  arch_ = kModuleArchUnknown;\n  internal_memset(uuid_, 0, kModuleUUIDSize);\n  instrumented_ = false;\n  while (!ranges_.empty()) {\n    AddressRange *r = ranges_.front();\n    ranges_.pop_front();\n    InternalFree(r);\n  }\n}\n\nvoid LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,\n                                   bool writable, const char *name) {\n  void *mem = InternalAlloc(sizeof(AddressRange));\n  AddressRange *r =\n      new(mem) AddressRange(beg, end, executable, writable, name);\n  ranges_.push_back(r);\n  if (executable && end > max_executable_address_)\n    max_executable_address_ = end;\n}\n\nbool LoadedModule::containsAddress(uptr address) const {\n  for (const AddressRange &r : ranges()) {\n    if (r.beg <= address && address < r.end)\n      return true;\n  }\n  return false;\n}\n\nstatic atomic_uintptr_t g_total_mmaped;\n\nvoid IncreaseTotalMmap(uptr size) {\n  if (!common_flags()->mmap_limit_mb) return;\n  uptr total_mmaped =\n      atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;\n  // Since for now mmap_limit_mb is not a user-facing flag, just kill\n  // a program. Use RAW_CHECK to avoid extra mmaps in reporting.\n  RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);\n}\n\nvoid DecreaseTotalMmap(uptr size) {\n  if (!common_flags()->mmap_limit_mb) return;\n  atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);\n}\n\nbool TemplateMatch(const char *templ, const char *str) {\n  if ((!str) || str[0] == 0)\n    return false;\n  bool start = false;\n  if (templ && templ[0] == '^') {\n    start = true;\n    templ++;\n  }\n  bool asterisk = false;\n  while (templ && templ[0]) {\n    if (templ[0] == '*') {\n      templ++;\n      start = false;\n      asterisk = true;\n      continue;\n    }\n    if (templ[0] == '$')\n      return str[0] == 0 || asterisk;\n    if (str[0] == 0)\n      return false;\n    char *tpos = (char*)internal_strchr(templ, '*');\n    char *tpos1 = (char*)internal_strchr(templ, '$');\n    if ((!tpos) || (tpos1 && tpos1 < tpos))\n      tpos = tpos1;\n    if (tpos)\n      tpos[0] = 0;\n    const char *str0 = str;\n    const char *spos = internal_strstr(str, templ);\n    str = spos + internal_strlen(templ);\n    templ = tpos;\n    if (tpos)\n      tpos[0] = tpos == tpos1 ? '$' : '*';\n    if (!spos)\n      return false;\n    if (start && spos != str0)\n      return false;\n    start = false;\n    asterisk = false;\n  }\n  return true;\n}\n\nstatic char binary_name_cache_str[kMaxPathLength];\nstatic char process_name_cache_str[kMaxPathLength];\n\nconst char *GetProcessName() {\n  return process_name_cache_str;\n}\n\nstatic uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {\n  ReadLongProcessName(buf, buf_len);\n  char *s = const_cast<char *>(StripModuleName(buf));\n  uptr len = internal_strlen(s);\n  if (s != buf) {\n    internal_memmove(buf, s, len);\n    buf[len] = '\\0';\n  }\n  return len;\n}\n\nvoid UpdateProcessName() {\n  ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));\n}\n\n// Call once to make sure that binary_name_cache_str is initialized\nvoid CacheBinaryName() {\n  if (binary_name_cache_str[0] != '\\0')\n    return;\n  ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));\n  ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));\n}\n\nuptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {\n  CacheBinaryName();\n  uptr name_len = internal_strlen(binary_name_cache_str);\n  name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;\n  if (buf_len == 0)\n    return 0;\n  internal_memcpy(buf, binary_name_cache_str, name_len);\n  buf[name_len] = '\\0';\n  return name_len;\n}\n\nuptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len) {\n  ReadBinaryNameCached(buf, buf_len);\n  const char *exec_name_pos = StripModuleName(buf);\n  uptr name_len = exec_name_pos - buf;\n  buf[name_len] = '\\0';\n  return name_len;\n}\n\n#if !SANITIZER_GO\nvoid PrintCmdline() {\n  char **argv = GetArgv();\n  if (!argv) return;\n  Printf(\"\\nCommand: \");\n  for (uptr i = 0; argv[i]; ++i)\n    Printf(\"%s \", argv[i]);\n  Printf(\"\\n\\n\");\n}\n#endif\n\n// Malloc hooks.\nstatic const int kMaxMallocFreeHooks = 5;\nstruct MallocFreeHook {\n  void (*malloc_hook)(const void *, uptr);\n  void (*free_hook)(const void *);\n};\n\nstatic MallocFreeHook MFHooks[kMaxMallocFreeHooks];\n\nvoid RunMallocHooks(const void *ptr, uptr size) {\n  for (int i = 0; i < kMaxMallocFreeHooks; i++) {\n    auto hook = MFHooks[i].malloc_hook;\n    if (!hook) return;\n    hook(ptr, size);\n  }\n}\n\nvoid RunFreeHooks(const void *ptr) {\n  for (int i = 0; i < kMaxMallocFreeHooks; i++) {\n    auto hook = MFHooks[i].free_hook;\n    if (!hook) return;\n    hook(ptr);\n  }\n}\n\nstatic int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),\n                                  void (*free_hook)(const void *)) {\n  if (!malloc_hook || !free_hook) return 0;\n  for (int i = 0; i < kMaxMallocFreeHooks; i++) {\n    if (MFHooks[i].malloc_hook == nullptr) {\n      MFHooks[i].malloc_hook = malloc_hook;\n      MFHooks[i].free_hook = free_hook;\n      return i + 1;\n    }\n  }\n  return 0;\n}\n\nvoid internal_sleep(unsigned seconds) {\n  internal_usleep((u64)seconds * 1000 * 1000);\n}\nvoid SleepForSeconds(unsigned seconds) {\n  internal_usleep((u64)seconds * 1000 * 1000);\n}\nvoid SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }\n\n} // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nextern \"C\" {\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,\n                             const char *error_summary) {\n  Printf(\"%s\\n\", error_summary);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __sanitizer_acquire_crash_state() {\n  static atomic_uint8_t in_crash_state = {};\n  return !atomic_exchange(&in_crash_state, 1, memory_order_relaxed);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,\n                                                                  uptr),\n                                              void (*free_hook)(const void *)) {\n  return InstallMallocFreeHooks(malloc_hook, free_hook);\n}\n} // extern \"C\"\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common.h",
    "content": "//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between run-time libraries of sanitizers.\n//\n// It declares common functions and classes that are used in both runtimes.\n// Implementation of some functions are provided in sanitizer_common, while\n// others must be defined by run-time library itself.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_COMMON_H\n#define SANITIZER_COMMON_H\n\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_interface_internal.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_list.h\"\n#include \"sanitizer_mutex.h\"\n\n#if defined(_MSC_VER) && !defined(__clang__)\nextern \"C\" void _ReadWriteBarrier();\n#pragma intrinsic(_ReadWriteBarrier)\n#endif\n\nnamespace __sanitizer {\n\nstruct AddressInfo;\nstruct BufferedStackTrace;\nstruct SignalContext;\nstruct StackTrace;\n\n// Constants.\nconst uptr kWordSize = SANITIZER_WORDSIZE / 8;\nconst uptr kWordSizeInBits = 8 * kWordSize;\n\nconst uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;\n\nconst uptr kMaxPathLength = 4096;\n\nconst uptr kMaxThreadStackSize = 1 << 30;  // 1Gb\n\nconst uptr kErrorMessageBufferSize = 1 << 16;\n\n// Denotes fake PC values that come from JIT/JAVA/etc.\n// For such PC values __tsan_symbolize_external_ex() will be called.\nconst u64 kExternalPCBit = 1ULL << 60;\n\nextern const char *SanitizerToolName;  // Can be changed by the tool.\n\nextern atomic_uint32_t current_verbosity;\ninline void SetVerbosity(int verbosity) {\n  atomic_store(&current_verbosity, verbosity, memory_order_relaxed);\n}\ninline int Verbosity() {\n  return atomic_load(&current_verbosity, memory_order_relaxed);\n}\n\n#if SANITIZER_ANDROID\ninline uptr GetPageSize() {\n// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.\n  return 4096;\n}\ninline uptr GetPageSizeCached() {\n  return 4096;\n}\n#else\nuptr GetPageSize();\nextern uptr PageSizeCached;\ninline uptr GetPageSizeCached() {\n  if (!PageSizeCached)\n    PageSizeCached = GetPageSize();\n  return PageSizeCached;\n}\n#endif\nuptr GetMmapGranularity();\nuptr GetMaxVirtualAddress();\nuptr GetMaxUserVirtualAddress();\n// Threads\ntid_t GetTid();\nint TgKill(pid_t pid, tid_t tid, int sig);\nuptr GetThreadSelf();\nvoid GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,\n                                uptr *stack_bottom);\nvoid GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,\n                          uptr *tls_addr, uptr *tls_size);\n\n// Memory management\nvoid *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);\ninline void *MmapOrDieQuietly(uptr size, const char *mem_type) {\n  return MmapOrDie(size, mem_type, /*raw_report*/ true);\n}\nvoid UnmapOrDie(void *addr, uptr size);\n// Behaves just like MmapOrDie, but tolerates out of memory condition, in that\n// case returns nullptr.\nvoid *MmapOrDieOnFatalError(uptr size, const char *mem_type);\nbool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)\n     WARN_UNUSED_RESULT;\nbool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,\n                             const char *name = nullptr) WARN_UNUSED_RESULT;\nvoid *MmapNoReserveOrDie(uptr size, const char *mem_type);\nvoid *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);\n// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in\n// that case returns nullptr.\nvoid *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,\n                                 const char *name = nullptr);\nvoid *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);\nvoid *MmapNoAccess(uptr size);\n// Map aligned chunk of address space; size and alignment are powers of two.\n// Dies on all but out of memory errors, in the latter case returns nullptr.\nvoid *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,\n                                   const char *mem_type);\n// Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an\n// unaccessible memory.\nbool MprotectNoAccess(uptr addr, uptr size);\nbool MprotectReadOnly(uptr addr, uptr size);\n\nvoid MprotectMallocZones(void *addr, int prot);\n\n#if SANITIZER_LINUX\n// Unmap memory. Currently only used on Linux.\nvoid UnmapFromTo(uptr from, uptr to);\n#endif\n\n// Maps shadow_size_bytes of shadow memory and returns shadow address. It will\n// be aligned to the mmap granularity * 2^shadow_scale, or to\n// 2^min_shadow_base_alignment if that is larger. The returned address will\n// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and\n// shadow_size_bytes bytes on the right, which on linux is mapped no access.\n// The high_mem_end may be updated if the original shadow size doesn't fit.\nuptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,\n                      uptr min_shadow_base_alignment, uptr &high_mem_end);\n\n// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).\n// Reserves 2*S bytes of address space to the right of the returned address and\n// ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.\n// Also creates num_aliases regions of accessible memory starting at offset S\n// from the returned address.  Each region has size alias_size and is backed by\n// the same physical memory.\nuptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,\n                                uptr num_aliases, uptr ring_buffer_size);\n\n// Reserve memory range [beg, end]. If madvise_shadow is true then apply\n// madvise (e.g. hugepages, core dumping) requested by options.\nvoid ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,\n                              bool madvise_shadow = true);\n\n// Protect size bytes of memory starting at addr. Also try to protect\n// several pages at the start of the address space as specified by\n// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.\nvoid ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,\n                uptr zero_base_max_shadow_start);\n\n// Find an available address space.\nuptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,\n                              uptr *largest_gap_found, uptr *max_occupied_addr);\n\n// Used to check if we can map shadow memory to a fixed location.\nbool MemoryRangeIsAvailable(uptr range_start, uptr range_end);\n// Releases memory pages entirely within the [beg, end] address range. Noop if\n// the provided range does not contain at least one entire page.\nvoid ReleaseMemoryPagesToOS(uptr beg, uptr end);\nvoid IncreaseTotalMmap(uptr size);\nvoid DecreaseTotalMmap(uptr size);\nuptr GetRSS();\nvoid SetShadowRegionHugePageMode(uptr addr, uptr length);\nbool DontDumpShadowMemory(uptr addr, uptr length);\n// Check if the built VMA size matches the runtime one.\nvoid CheckVMASize();\nvoid RunMallocHooks(const void *ptr, uptr size);\nvoid RunFreeHooks(const void *ptr);\n\nclass ReservedAddressRange {\n public:\n  uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);\n  uptr InitAligned(uptr size, uptr align, const char *name = nullptr);\n  uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);\n  uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);\n  void Unmap(uptr addr, uptr size);\n  void *base() const { return base_; }\n  uptr size() const { return size_; }\n\n private:\n  void* base_;\n  uptr size_;\n  const char* name_;\n  uptr os_handle_;\n};\n\ntypedef void (*fill_profile_f)(uptr start, uptr rss, bool file,\n                               /*out*/ uptr *stats);\n\n// Parse the contents of /proc/self/smaps and generate a memory profile.\n// |cb| is a tool-specific callback that fills the |stats| array.\nvoid GetMemoryProfile(fill_profile_f cb, uptr *stats);\nvoid ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,\n                            uptr smaps_len);\n\n// Simple low-level (mmap-based) allocator for internal use. Doesn't have\n// constructor, so all instances of LowLevelAllocator should be\n// linker initialized.\nclass LowLevelAllocator {\n public:\n  // Requires an external lock.\n  void *Allocate(uptr size);\n private:\n  char *allocated_end_;\n  char *allocated_current_;\n};\n// Set the min alignment of LowLevelAllocator to at least alignment.\nvoid SetLowLevelAllocateMinAlignment(uptr alignment);\ntypedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);\n// Allows to register tool-specific callbacks for LowLevelAllocator.\n// Passing NULL removes the callback.\nvoid SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);\n\n// IO\nvoid CatastrophicErrorWrite(const char *buffer, uptr length);\nvoid RawWrite(const char *buffer);\nbool ColorizeReports();\nvoid RemoveANSIEscapeSequencesFromString(char *buffer);\nvoid Printf(const char *format, ...) FORMAT(1, 2);\nvoid Report(const char *format, ...) FORMAT(1, 2);\nvoid SetPrintfAndReportCallback(void (*callback)(const char *));\n#define VReport(level, ...)                                              \\\n  do {                                                                   \\\n    if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \\\n  } while (0)\n#define VPrintf(level, ...)                                              \\\n  do {                                                                   \\\n    if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \\\n  } while (0)\n\n// Lock sanitizer error reporting and protects against nested errors.\nclass ScopedErrorReportLock {\n public:\n  ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }\n  ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }\n\n  static void Lock() SANITIZER_ACQUIRE(mutex_);\n  static void Unlock() SANITIZER_RELEASE(mutex_);\n  static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);\n\n private:\n  static atomic_uintptr_t reporting_thread_;\n  static StaticSpinMutex mutex_;\n};\n\nextern uptr stoptheworld_tracer_pid;\nextern uptr stoptheworld_tracer_ppid;\n\nbool IsAccessibleMemoryRange(uptr beg, uptr size);\n\n// Error report formatting.\nconst char *StripPathPrefix(const char *filepath,\n                            const char *strip_file_prefix);\n// Strip the directories from the module name.\nconst char *StripModuleName(const char *module);\n\n// OS\nuptr ReadBinaryName(/*out*/char *buf, uptr buf_len);\nuptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);\nuptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);\nuptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);\nconst char *GetProcessName();\nvoid UpdateProcessName();\nvoid CacheBinaryName();\nvoid DisableCoreDumperIfNecessary();\nvoid DumpProcessMap();\nconst char *GetEnv(const char *name);\nbool SetEnv(const char *name, const char *value);\n\nu32 GetUid();\nvoid ReExec();\nvoid CheckASLR();\nvoid CheckMPROTECT();\nchar **GetArgv();\nchar **GetEnviron();\nvoid PrintCmdline();\nbool StackSizeIsUnlimited();\nvoid SetStackSizeLimitInBytes(uptr limit);\nbool AddressSpaceIsUnlimited();\nvoid SetAddressSpaceUnlimited();\nvoid AdjustStackSize(void *attr);\nvoid PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);\nvoid SetSandboxingCallback(void (*f)());\n\nvoid InitializeCoverage(bool enabled, const char *coverage_dir);\n\nvoid InitTlsSize();\nuptr GetTlsSize();\n\n// Other\nvoid SleepForSeconds(unsigned seconds);\nvoid SleepForMillis(unsigned millis);\nu64 NanoTime();\nu64 MonotonicNanoTime();\nint Atexit(void (*function)(void));\nbool TemplateMatch(const char *templ, const char *str);\n\n// Exit\nvoid NORETURN Abort();\nvoid NORETURN Die();\nvoid NORETURN\nCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);\nvoid NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,\n                                      const char *mmap_type, error_t err,\n                                      bool raw_report = false);\n\n// Specific tools may override behavior of \"Die\" function to do tool-specific\n// job.\ntypedef void (*DieCallbackType)(void);\n\n// It's possible to add several callbacks that would be run when \"Die\" is\n// called. The callbacks will be run in the opposite order. The tools are\n// strongly recommended to setup all callbacks during initialization, when there\n// is only a single thread.\nbool AddDieCallback(DieCallbackType callback);\nbool RemoveDieCallback(DieCallbackType callback);\n\nvoid SetUserDieCallback(DieCallbackType callback);\n\nvoid SetCheckUnwindCallback(void (*callback)());\n\n// Functions related to signal handling.\ntypedef void (*SignalHandlerType)(int, void *, void *);\nHandleSignalMode GetHandleSignalMode(int signum);\nvoid InstallDeadlySignalHandlers(SignalHandlerType handler);\n\n// Signal reporting.\n// Each sanitizer uses slightly different implementation of stack unwinding.\ntypedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,\n                                              const void *callback_context,\n                                              BufferedStackTrace *stack);\n// Print deadly signal report and die.\nvoid HandleDeadlySignal(void *siginfo, void *context, u32 tid,\n                        UnwindSignalStackCallbackType unwind,\n                        const void *unwind_context);\n\n// Part of HandleDeadlySignal, exposed for asan.\nvoid StartReportDeadlySignal();\n// Part of HandleDeadlySignal, exposed for asan.\nvoid ReportDeadlySignal(const SignalContext &sig, u32 tid,\n                        UnwindSignalStackCallbackType unwind,\n                        const void *unwind_context);\n\n// Alternative signal stack (POSIX-only).\nvoid SetAlternateSignalStack();\nvoid UnsetAlternateSignalStack();\n\n// Construct a one-line string:\n//   SUMMARY: SanitizerToolName: error_message\n// and pass it to __sanitizer_report_error_summary.\n// If alt_tool_name is provided, it's used in place of SanitizerToolName.\nvoid ReportErrorSummary(const char *error_message,\n                        const char *alt_tool_name = nullptr);\n// Same as above, but construct error_message as:\n//   error_type file:line[:column][ function]\nvoid ReportErrorSummary(const char *error_type, const AddressInfo &info,\n                        const char *alt_tool_name = nullptr);\n// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.\nvoid ReportErrorSummary(const char *error_type, const StackTrace *trace,\n                        const char *alt_tool_name = nullptr);\n\nvoid ReportMmapWriteExec(int prot, int mflags);\n\n// Math\n#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)\nextern \"C\" {\nunsigned char _BitScanForward(unsigned long *index, unsigned long mask);\nunsigned char _BitScanReverse(unsigned long *index, unsigned long mask);\n#if defined(_WIN64)\nunsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);\nunsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);\n#endif\n}\n#endif\n\ninline uptr MostSignificantSetBitIndex(uptr x) {\n  CHECK_NE(x, 0U);\n  unsigned long up;\n#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)\n# ifdef _WIN64\n  up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);\n# else\n  up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);\n# endif\n#elif defined(_WIN64)\n  _BitScanReverse64(&up, x);\n#else\n  _BitScanReverse(&up, x);\n#endif\n  return up;\n}\n\ninline uptr LeastSignificantSetBitIndex(uptr x) {\n  CHECK_NE(x, 0U);\n  unsigned long up;\n#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)\n# ifdef _WIN64\n  up = __builtin_ctzll(x);\n# else\n  up = __builtin_ctzl(x);\n# endif\n#elif defined(_WIN64)\n  _BitScanForward64(&up, x);\n#else\n  _BitScanForward(&up, x);\n#endif\n  return up;\n}\n\ninline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }\n\ninline uptr RoundUpToPowerOfTwo(uptr size) {\n  CHECK(size);\n  if (IsPowerOfTwo(size)) return size;\n\n  uptr up = MostSignificantSetBitIndex(size);\n  CHECK_LT(size, (1ULL << (up + 1)));\n  CHECK_GT(size, (1ULL << up));\n  return 1ULL << (up + 1);\n}\n\ninline constexpr uptr RoundUpTo(uptr size, uptr boundary) {\n  RAW_CHECK(IsPowerOfTwo(boundary));\n  return (size + boundary - 1) & ~(boundary - 1);\n}\n\ninline constexpr uptr RoundDownTo(uptr x, uptr boundary) {\n  return x & ~(boundary - 1);\n}\n\ninline constexpr bool IsAligned(uptr a, uptr alignment) {\n  return (a & (alignment - 1)) == 0;\n}\n\ninline uptr Log2(uptr x) {\n  CHECK(IsPowerOfTwo(x));\n  return LeastSignificantSetBitIndex(x);\n}\n\n// Don't use std::min, std::max or std::swap, to minimize dependency\n// on libstdc++.\ntemplate <class T>\nconstexpr T Min(T a, T b) {\n  return a < b ? a : b;\n}\ntemplate <class T>\nconstexpr T Max(T a, T b) {\n  return a > b ? a : b;\n}\ntemplate <class T>\nconstexpr T Abs(T a) {\n  return a < 0 ? -a : a;\n}\ntemplate<class T> void Swap(T& a, T& b) {\n  T tmp = a;\n  a = b;\n  b = tmp;\n}\n\n// Char handling\ninline bool IsSpace(int c) {\n  return (c == ' ') || (c == '\\n') || (c == '\\t') ||\n         (c == '\\f') || (c == '\\r') || (c == '\\v');\n}\ninline bool IsDigit(int c) {\n  return (c >= '0') && (c <= '9');\n}\ninline int ToLower(int c) {\n  return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;\n}\n\n// A low-level vector based on mmap. May incur a significant memory overhead for\n// small vectors.\n// WARNING: The current implementation supports only POD types.\ntemplate<typename T>\nclass InternalMmapVectorNoCtor {\n public:\n  using value_type = T;\n  void Initialize(uptr initial_capacity) {\n    capacity_bytes_ = 0;\n    size_ = 0;\n    data_ = 0;\n    reserve(initial_capacity);\n  }\n  void Destroy() { UnmapOrDie(data_, capacity_bytes_); }\n  T &operator[](uptr i) {\n    CHECK_LT(i, size_);\n    return data_[i];\n  }\n  const T &operator[](uptr i) const {\n    CHECK_LT(i, size_);\n    return data_[i];\n  }\n  void push_back(const T &element) {\n    CHECK_LE(size_, capacity());\n    if (size_ == capacity()) {\n      uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);\n      Realloc(new_capacity);\n    }\n    internal_memcpy(&data_[size_++], &element, sizeof(T));\n  }\n  T &back() {\n    CHECK_GT(size_, 0);\n    return data_[size_ - 1];\n  }\n  void pop_back() {\n    CHECK_GT(size_, 0);\n    size_--;\n  }\n  uptr size() const {\n    return size_;\n  }\n  const T *data() const {\n    return data_;\n  }\n  T *data() {\n    return data_;\n  }\n  uptr capacity() const { return capacity_bytes_ / sizeof(T); }\n  void reserve(uptr new_size) {\n    // Never downsize internal buffer.\n    if (new_size > capacity())\n      Realloc(new_size);\n  }\n  void resize(uptr new_size) {\n    if (new_size > size_) {\n      reserve(new_size);\n      internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));\n    }\n    size_ = new_size;\n  }\n\n  void clear() { size_ = 0; }\n  bool empty() const { return size() == 0; }\n\n  const T *begin() const {\n    return data();\n  }\n  T *begin() {\n    return data();\n  }\n  const T *end() const {\n    return data() + size();\n  }\n  T *end() {\n    return data() + size();\n  }\n\n  void swap(InternalMmapVectorNoCtor &other) {\n    Swap(data_, other.data_);\n    Swap(capacity_bytes_, other.capacity_bytes_);\n    Swap(size_, other.size_);\n  }\n\n private:\n  void Realloc(uptr new_capacity) {\n    CHECK_GT(new_capacity, 0);\n    CHECK_LE(size_, new_capacity);\n    uptr new_capacity_bytes =\n        RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());\n    T *new_data = (T *)MmapOrDie(new_capacity_bytes, \"InternalMmapVector\");\n    internal_memcpy(new_data, data_, size_ * sizeof(T));\n    UnmapOrDie(data_, capacity_bytes_);\n    data_ = new_data;\n    capacity_bytes_ = new_capacity_bytes;\n  }\n\n  T *data_;\n  uptr capacity_bytes_;\n  uptr size_;\n};\n\ntemplate <typename T>\nbool operator==(const InternalMmapVectorNoCtor<T> &lhs,\n                const InternalMmapVectorNoCtor<T> &rhs) {\n  if (lhs.size() != rhs.size()) return false;\n  return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;\n}\n\ntemplate <typename T>\nbool operator!=(const InternalMmapVectorNoCtor<T> &lhs,\n                const InternalMmapVectorNoCtor<T> &rhs) {\n  return !(lhs == rhs);\n}\n\ntemplate<typename T>\nclass InternalMmapVector : public InternalMmapVectorNoCtor<T> {\n public:\n  InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }\n  explicit InternalMmapVector(uptr cnt) {\n    InternalMmapVectorNoCtor<T>::Initialize(cnt);\n    this->resize(cnt);\n  }\n  ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }\n  // Disallow copies and moves.\n  InternalMmapVector(const InternalMmapVector &) = delete;\n  InternalMmapVector &operator=(const InternalMmapVector &) = delete;\n  InternalMmapVector(InternalMmapVector &&) = delete;\n  InternalMmapVector &operator=(InternalMmapVector &&) = delete;\n};\n\nclass InternalScopedString {\n public:\n  InternalScopedString() : buffer_(1) { buffer_[0] = '\\0'; }\n\n  uptr length() const { return buffer_.size() - 1; }\n  void clear() {\n    buffer_.resize(1);\n    buffer_[0] = '\\0';\n  }\n  void append(const char *format, ...) FORMAT(2, 3);\n  const char *data() const { return buffer_.data(); }\n  char *data() { return buffer_.data(); }\n\n private:\n  InternalMmapVector<char> buffer_;\n};\n\ntemplate <class T>\nstruct CompareLess {\n  bool operator()(const T &a, const T &b) const { return a < b; }\n};\n\n// HeapSort for arrays and InternalMmapVector.\ntemplate <class T, class Compare = CompareLess<T>>\nvoid Sort(T *v, uptr size, Compare comp = {}) {\n  if (size < 2)\n    return;\n  // Stage 1: insert elements to the heap.\n  for (uptr i = 1; i < size; i++) {\n    uptr j, p;\n    for (j = i; j > 0; j = p) {\n      p = (j - 1) / 2;\n      if (comp(v[p], v[j]))\n        Swap(v[j], v[p]);\n      else\n        break;\n    }\n  }\n  // Stage 2: swap largest element with the last one,\n  // and sink the new top.\n  for (uptr i = size - 1; i > 0; i--) {\n    Swap(v[0], v[i]);\n    uptr j, max_ind;\n    for (j = 0; j < i; j = max_ind) {\n      uptr left = 2 * j + 1;\n      uptr right = 2 * j + 2;\n      max_ind = j;\n      if (left < i && comp(v[max_ind], v[left]))\n        max_ind = left;\n      if (right < i && comp(v[max_ind], v[right]))\n        max_ind = right;\n      if (max_ind != j)\n        Swap(v[j], v[max_ind]);\n      else\n        break;\n    }\n  }\n}\n\n// Works like std::lower_bound: finds the first element that is not less\n// than the val.\ntemplate <class Container, class T,\n          class Compare = CompareLess<typename Container::value_type>>\nuptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {\n  uptr first = 0;\n  uptr last = v.size();\n  while (last > first) {\n    uptr mid = (first + last) / 2;\n    if (comp(v[mid], val))\n      first = mid + 1;\n    else\n      last = mid;\n  }\n  return first;\n}\n\nenum ModuleArch {\n  kModuleArchUnknown,\n  kModuleArchI386,\n  kModuleArchX86_64,\n  kModuleArchX86_64H,\n  kModuleArchARMV6,\n  kModuleArchARMV7,\n  kModuleArchARMV7S,\n  kModuleArchARMV7K,\n  kModuleArchARM64,\n  kModuleArchRISCV64,\n  kModuleArchHexagon\n};\n\n// Sorts and removes duplicates from the container.\ntemplate <class Container,\n          class Compare = CompareLess<typename Container::value_type>>\nvoid SortAndDedup(Container &v, Compare comp = {}) {\n  Sort(v.data(), v.size(), comp);\n  uptr size = v.size();\n  if (size < 2)\n    return;\n  uptr last = 0;\n  for (uptr i = 1; i < size; ++i) {\n    if (comp(v[last], v[i])) {\n      ++last;\n      if (last != i)\n        v[last] = v[i];\n    } else {\n      CHECK(!comp(v[i], v[last]));\n    }\n  }\n  v.resize(last + 1);\n}\n\nconstexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);\n\n// Opens the file 'file_name\" and reads up to 'max_len' bytes.\n// The resulting buffer is mmaped and stored in '*buff'.\n// Returns true if file was successfully opened and read.\nbool ReadFileToVector(const char *file_name,\n                      InternalMmapVectorNoCtor<char> *buff,\n                      uptr max_len = kDefaultFileMaxSize,\n                      error_t *errno_p = nullptr);\n\n// Opens the file 'file_name\" and reads up to 'max_len' bytes.\n// This function is less I/O efficient than ReadFileToVector as it may reread\n// file multiple times to avoid mmap during read attempts. It's used to read\n// procmap, so short reads with mmap in between can produce inconsistent result.\n// The resulting buffer is mmaped and stored in '*buff'.\n// The size of the mmaped region is stored in '*buff_size'.\n// The total number of read bytes is stored in '*read_len'.\n// Returns true if file was successfully opened and read.\nbool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,\n                      uptr *read_len, uptr max_len = kDefaultFileMaxSize,\n                      error_t *errno_p = nullptr);\n\n// When adding a new architecture, don't forget to also update\n// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.\ninline const char *ModuleArchToString(ModuleArch arch) {\n  switch (arch) {\n    case kModuleArchUnknown:\n      return \"\";\n    case kModuleArchI386:\n      return \"i386\";\n    case kModuleArchX86_64:\n      return \"x86_64\";\n    case kModuleArchX86_64H:\n      return \"x86_64h\";\n    case kModuleArchARMV6:\n      return \"armv6\";\n    case kModuleArchARMV7:\n      return \"armv7\";\n    case kModuleArchARMV7S:\n      return \"armv7s\";\n    case kModuleArchARMV7K:\n      return \"armv7k\";\n    case kModuleArchARM64:\n      return \"arm64\";\n    case kModuleArchRISCV64:\n      return \"riscv64\";\n    case kModuleArchHexagon:\n      return \"hexagon\";\n  }\n  CHECK(0 && \"Invalid module arch\");\n  return \"\";\n}\n\nconst uptr kModuleUUIDSize = 32;\nconst uptr kMaxSegName = 16;\n\n// Represents a binary loaded into virtual memory (e.g. this can be an\n// executable or a shared object).\nclass LoadedModule {\n public:\n  LoadedModule()\n      : full_name_(nullptr),\n        base_address_(0),\n        max_executable_address_(0),\n        arch_(kModuleArchUnknown),\n        uuid_size_(0),\n        instrumented_(false) {\n    internal_memset(uuid_, 0, kModuleUUIDSize);\n    ranges_.clear();\n  }\n  void set(const char *module_name, uptr base_address);\n  void set(const char *module_name, uptr base_address, ModuleArch arch,\n           u8 uuid[kModuleUUIDSize], bool instrumented);\n  void setUuid(const char *uuid, uptr size);\n  void clear();\n  void addAddressRange(uptr beg, uptr end, bool executable, bool writable,\n                       const char *name = nullptr);\n  bool containsAddress(uptr address) const;\n\n  const char *full_name() const { return full_name_; }\n  uptr base_address() const { return base_address_; }\n  uptr max_executable_address() const { return max_executable_address_; }\n  ModuleArch arch() const { return arch_; }\n  const u8 *uuid() const { return uuid_; }\n  uptr uuid_size() const { return uuid_size_; }\n  bool instrumented() const { return instrumented_; }\n\n  struct AddressRange {\n    AddressRange *next;\n    uptr beg;\n    uptr end;\n    bool executable;\n    bool writable;\n    char name[kMaxSegName];\n\n    AddressRange(uptr beg, uptr end, bool executable, bool writable,\n                 const char *name)\n        : next(nullptr),\n          beg(beg),\n          end(end),\n          executable(executable),\n          writable(writable) {\n      internal_strncpy(this->name, (name ? name : \"\"), ARRAY_SIZE(this->name));\n    }\n  };\n\n  const IntrusiveList<AddressRange> &ranges() const { return ranges_; }\n\n private:\n  char *full_name_;  // Owned.\n  uptr base_address_;\n  uptr max_executable_address_;\n  ModuleArch arch_;\n  uptr uuid_size_;\n  u8 uuid_[kModuleUUIDSize];\n  bool instrumented_;\n  IntrusiveList<AddressRange> ranges_;\n};\n\n// List of LoadedModules. OS-dependent implementation is responsible for\n// filling this information.\nclass ListOfModules {\n public:\n  ListOfModules() : initialized(false) {}\n  ~ListOfModules() { clear(); }\n  void init();\n  void fallbackInit();  // Uses fallback init if available, otherwise clears\n  const LoadedModule *begin() const { return modules_.begin(); }\n  LoadedModule *begin() { return modules_.begin(); }\n  const LoadedModule *end() const { return modules_.end(); }\n  LoadedModule *end() { return modules_.end(); }\n  uptr size() const { return modules_.size(); }\n  const LoadedModule &operator[](uptr i) const {\n    CHECK_LT(i, modules_.size());\n    return modules_[i];\n  }\n\n private:\n  void clear() {\n    for (auto &module : modules_) module.clear();\n    modules_.clear();\n  }\n  void clearOrInit() {\n    initialized ? clear() : modules_.Initialize(kInitialCapacity);\n    initialized = true;\n  }\n\n  InternalMmapVectorNoCtor<LoadedModule> modules_;\n  // We rarely have more than 16K loaded modules.\n  static const uptr kInitialCapacity = 1 << 14;\n  bool initialized;\n};\n\n// Callback type for iterating over a set of memory ranges.\ntypedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);\n\nenum AndroidApiLevel {\n  ANDROID_NOT_ANDROID = 0,\n  ANDROID_KITKAT = 19,\n  ANDROID_LOLLIPOP_MR1 = 22,\n  ANDROID_POST_LOLLIPOP = 23\n};\n\nvoid WriteToSyslog(const char *buffer);\n\n#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)\n#define SANITIZER_WIN_TRACE 1\n#else\n#define SANITIZER_WIN_TRACE 0\n#endif\n\n#if SANITIZER_MAC || SANITIZER_WIN_TRACE\nvoid LogFullErrorReport(const char *buffer);\n#else\ninline void LogFullErrorReport(const char *buffer) {}\n#endif\n\n#if SANITIZER_LINUX || SANITIZER_MAC\nvoid WriteOneLineToSyslog(const char *s);\nvoid LogMessageOnPrintf(const char *str);\n#else\ninline void WriteOneLineToSyslog(const char *s) {}\ninline void LogMessageOnPrintf(const char *str) {}\n#endif\n\n#if SANITIZER_LINUX || SANITIZER_WIN_TRACE\n// Initialize Android logging. Any writes before this are silently lost.\nvoid AndroidLogInit();\nvoid SetAbortMessage(const char *);\n#else\ninline void AndroidLogInit() {}\n// FIXME: MacOS implementation could use CRSetCrashLogMessage.\ninline void SetAbortMessage(const char *) {}\n#endif\n\n#if SANITIZER_ANDROID\nvoid SanitizerInitializeUnwinder();\nAndroidApiLevel AndroidGetApiLevel();\n#else\ninline void AndroidLogWrite(const char *buffer_unused) {}\ninline void SanitizerInitializeUnwinder() {}\ninline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }\n#endif\n\ninline uptr GetPthreadDestructorIterations() {\n#if SANITIZER_ANDROID\n  return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;\n#elif SANITIZER_POSIX\n  return 4;\n#else\n// Unused on Windows.\n  return 0;\n#endif\n}\n\nvoid *internal_start_thread(void *(*func)(void*), void *arg);\nvoid internal_join_thread(void *th);\nvoid MaybeStartBackgroudThread();\n\n// Make the compiler think that something is going on there.\n// Use this inside a loop that looks like memset/memcpy/etc to prevent the\n// compiler from recognising it and turning it into an actual call to\n// memset/memcpy/etc.\nstatic inline void SanitizerBreakOptimization(void *arg) {\n#if defined(_MSC_VER) && !defined(__clang__)\n  _ReadWriteBarrier();\n#else\n  __asm__ __volatile__(\"\" : : \"r\" (arg) : \"memory\");\n#endif\n}\n\nstruct SignalContext {\n  void *siginfo;\n  void *context;\n  uptr addr;\n  uptr pc;\n  uptr sp;\n  uptr bp;\n  bool is_memory_access;\n  enum WriteFlag { Unknown, Read, Write } write_flag;\n\n  // In some cases the kernel cannot provide the true faulting address; `addr`\n  // will be zero then.  This field allows to distinguish between these cases\n  // and dereferences of null.\n  bool is_true_faulting_addr;\n\n  // VS2013 doesn't implement unrestricted unions, so we need a trivial default\n  // constructor\n  SignalContext() = default;\n\n  // Creates signal context in a platform-specific manner.\n  // SignalContext is going to keep pointers to siginfo and context without\n  // owning them.\n  SignalContext(void *siginfo, void *context)\n      : siginfo(siginfo),\n        context(context),\n        addr(GetAddress()),\n        is_memory_access(IsMemoryAccess()),\n        write_flag(GetWriteFlag()),\n        is_true_faulting_addr(IsTrueFaultingAddress()) {\n    InitPcSpBp();\n  }\n\n  static void DumpAllRegisters(void *context);\n\n  // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.\n  int GetType() const;\n\n  // String description of the signal.\n  const char *Describe() const;\n\n  // Returns true if signal is stack overflow.\n  bool IsStackOverflow() const;\n\n private:\n  // Platform specific initialization.\n  void InitPcSpBp();\n  uptr GetAddress() const;\n  WriteFlag GetWriteFlag() const;\n  bool IsMemoryAccess() const;\n  bool IsTrueFaultingAddress() const;\n};\n\nvoid InitializePlatformEarly();\nvoid MaybeReexec();\n\ntemplate <typename Fn>\nclass RunOnDestruction {\n public:\n  explicit RunOnDestruction(Fn fn) : fn_(fn) {}\n  ~RunOnDestruction() { fn_(); }\n\n private:\n  Fn fn_;\n};\n\n// A simple scope guard. Usage:\n// auto cleanup = at_scope_exit([]{ do_cleanup; });\ntemplate <typename Fn>\nRunOnDestruction<Fn> at_scope_exit(Fn fn) {\n  return RunOnDestruction<Fn>(fn);\n}\n\n// Linux on 64-bit s390 had a nasty bug that crashes the whole machine\n// if a process uses virtual memory over 4TB (as many sanitizers like\n// to do).  This function will abort the process if running on a kernel\n// that looks vulnerable.\n#if SANITIZER_LINUX && SANITIZER_S390_64\nvoid AvoidCVE_2016_2143();\n#else\ninline void AvoidCVE_2016_2143() {}\n#endif\n\nstruct StackDepotStats {\n  uptr n_uniq_ids;\n  uptr allocated;\n};\n\n// The default value for allocator_release_to_os_interval_ms common flag to\n// indicate that sanitizer allocator should not attempt to release memory to OS.\nconst s32 kReleaseToOSIntervalNever = -1;\n\nvoid CheckNoDeepBind(const char *filename, int flag);\n\n// Returns the requested amount of random data (up to 256 bytes) that can then\n// be used to seed a PRNG. Defaults to blocking like the underlying syscall.\nbool GetRandom(void *buffer, uptr length, bool blocking = true);\n\n// Returns the number of logical processors on the system.\nu32 GetNumberOfCPUs();\nextern u32 NumberOfCPUsCached;\ninline u32 GetNumberOfCPUsCached() {\n  if (!NumberOfCPUsCached)\n    NumberOfCPUsCached = GetNumberOfCPUs();\n  return NumberOfCPUsCached;\n}\n\ntemplate <typename T>\nclass ArrayRef {\n public:\n  ArrayRef() {}\n  ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}\n\n  T *begin() { return begin_; }\n  T *end() { return end_; }\n\n private:\n  T *begin_ = nullptr;\n  T *end_ = nullptr;\n};\n\n}  // namespace __sanitizer\n\ninline void *operator new(__sanitizer::operator_new_size_type size,\n                          __sanitizer::LowLevelAllocator &alloc) {\n  return alloc.Allocate(size);\n}\n\n#endif  // SANITIZER_COMMON_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors.inc",
    "content": "//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common function interceptors for tools like AddressSanitizer,\n// ThreadSanitizer, MemorySanitizer, etc.\n//\n// This file should be included into the tool's interceptor file,\n// which has to define its own macros:\n//   COMMON_INTERCEPTOR_ENTER\n//   COMMON_INTERCEPTOR_ENTER_NOIGNORE\n//   COMMON_INTERCEPTOR_READ_RANGE\n//   COMMON_INTERCEPTOR_WRITE_RANGE\n//   COMMON_INTERCEPTOR_INITIALIZE_RANGE\n//   COMMON_INTERCEPTOR_DIR_ACQUIRE\n//   COMMON_INTERCEPTOR_FD_ACQUIRE\n//   COMMON_INTERCEPTOR_FD_RELEASE\n//   COMMON_INTERCEPTOR_FD_ACCESS\n//   COMMON_INTERCEPTOR_SET_THREAD_NAME\n//   COMMON_INTERCEPTOR_DLOPEN\n//   COMMON_INTERCEPTOR_ON_EXIT\n//   COMMON_INTERCEPTOR_MUTEX_PRE_LOCK\n//   COMMON_INTERCEPTOR_MUTEX_POST_LOCK\n//   COMMON_INTERCEPTOR_MUTEX_UNLOCK\n//   COMMON_INTERCEPTOR_MUTEX_REPAIR\n//   COMMON_INTERCEPTOR_SET_PTHREAD_NAME\n//   COMMON_INTERCEPTOR_HANDLE_RECVMSG\n//   COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED\n//   COMMON_INTERCEPTOR_MEMSET_IMPL\n//   COMMON_INTERCEPTOR_MEMMOVE_IMPL\n//   COMMON_INTERCEPTOR_MEMCPY_IMPL\n//   COMMON_INTERCEPTOR_MMAP_IMPL\n//   COMMON_INTERCEPTOR_COPY_STRING\n//   COMMON_INTERCEPTOR_STRNDUP_IMPL\n//   COMMON_INTERCEPTOR_STRERROR\n//===----------------------------------------------------------------------===//\n\n#include \"interception/interception.h\"\n#include \"sanitizer_addrhashmap.h\"\n#include \"sanitizer_errno.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_platform_interceptors.h\"\n#include \"sanitizer_symbolizer.h\"\n#include \"sanitizer_tls_get_addr.h\"\n\n#include <stdarg.h>\n\n#if SANITIZER_INTERCEPTOR_HOOKS\n#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);\n#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \\\n  SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {}\n#else\n#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)\n#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)\n\n#endif  // SANITIZER_INTERCEPTOR_HOOKS\n\n#if SANITIZER_WINDOWS && !defined(va_copy)\n#define va_copy(dst, src) ((dst) = (src))\n#endif // _WIN32\n\n#if SANITIZER_FREEBSD\n#define pthread_setname_np pthread_set_name_np\n#define inet_aton __inet_aton\n#define inet_pton __inet_pton\n#define iconv __bsd_iconv\n#endif\n\n#if SANITIZER_NETBSD\n#define clock_getres __clock_getres50\n#define clock_gettime __clock_gettime50\n#define clock_settime __clock_settime50\n#define ctime __ctime50\n#define ctime_r __ctime_r50\n#define devname __devname50\n#define fgetpos __fgetpos50\n#define fsetpos __fsetpos50\n#define fstatvfs __fstatvfs90\n#define fstatvfs1 __fstatvfs190\n#define fts_children __fts_children60\n#define fts_close __fts_close60\n#define fts_open __fts_open60\n#define fts_read __fts_read60\n#define fts_set __fts_set60\n#define getitimer __getitimer50\n#define getmntinfo __getmntinfo90\n#define getpwent __getpwent50\n#define getpwnam __getpwnam50\n#define getpwnam_r __getpwnam_r50\n#define getpwuid __getpwuid50\n#define getpwuid_r __getpwuid_r50\n#define getutent __getutent50\n#define getutxent __getutxent50\n#define getutxid __getutxid50\n#define getutxline __getutxline50\n#define getvfsstat __getvfsstat90\n#define pututxline __pututxline50\n#define glob __glob30\n#define gmtime __gmtime50\n#define gmtime_r __gmtime_r50\n#define localtime __locatime50\n#define localtime_r __localtime_r50\n#define mktime __mktime50\n#define lstat __lstat50\n#define opendir __opendir30\n#define readdir __readdir30\n#define readdir_r __readdir_r30\n#define scandir __scandir30\n#define setitimer __setitimer50\n#define setlocale __setlocale50\n#define shmctl __shmctl50\n#define sigaltstack __sigaltstack14\n#define sigemptyset __sigemptyset14\n#define sigfillset __sigfillset14\n#define sigpending __sigpending14\n#define sigprocmask __sigprocmask14\n#define sigtimedwait __sigtimedwait50\n#define stat __stat50\n#define statvfs __statvfs90\n#define statvfs1 __statvfs190\n#define time __time50\n#define times __times13\n#define unvis __unvis50\n#define wait3 __wait350\n#define wait4 __wait450\nextern const unsigned short *_ctype_tab_;\nextern const short *_toupper_tab_;\nextern const short *_tolower_tab_;\n#endif\n\n// Platform-specific options.\n#if SANITIZER_MAC\n#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0\n#elif SANITIZER_WINDOWS64\n#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0\n#else\n#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1\n#endif  // SANITIZER_MAC\n\n#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE\n#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM\n#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_FD_ACCESS\n#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK\n#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK\n#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK\n#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR\n#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID\n#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG\n#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))\n#endif\n\n#ifndef COMMON_INTERCEPTOR_FILE_OPEN\n#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_FILE_CLOSE\n#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED\n#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED\n#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE\n#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \\\n  COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__)\n#endif\n\n#ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED\n#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)\n#endif\n\n#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n)                   \\\n    COMMON_INTERCEPTOR_READ_RANGE((ctx), (s),                       \\\n      common_flags()->strict_string_checks ? (internal_strlen(s)) + 1 : (n) )\n\n#ifndef COMMON_INTERCEPTOR_DLOPEN\n#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \\\n  ({ CheckNoDeepBind(filename, flag); REAL(dlopen)(filename, flag); })\n#endif\n\n#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE\n#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) *begin = *end = 0;\n#endif\n\n#ifndef COMMON_INTERCEPTOR_ACQUIRE\n#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_RELEASE\n#define COMMON_INTERCEPTOR_RELEASE(ctx, u) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_START\n#define COMMON_INTERCEPTOR_USER_CALLBACK_START() {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_END\n#define COMMON_INTERCEPTOR_USER_CALLBACK_END() {}\n#endif\n\n#ifdef SANITIZER_NLDBL_VERSION\n#define COMMON_INTERCEPT_FUNCTION_LDBL(fn)                          \\\n    COMMON_INTERCEPT_FUNCTION_VER(fn, SANITIZER_NLDBL_VERSION)\n#else\n#define COMMON_INTERCEPT_FUNCTION_LDBL(fn)                          \\\n    COMMON_INTERCEPT_FUNCTION(fn)\n#endif\n\n#if SANITIZER_GLIBC\n// If we could not find the versioned symbol, fall back to an unversioned\n// lookup. This is needed to work around a GLibc bug that causes dlsym\n// with RTLD_NEXT to return the oldest versioned symbol.\n// See https://sourceware.org/bugzilla/show_bug.cgi?id=14932.\n// For certain symbols (e.g. regexec) we have to perform a versioned lookup,\n// but that versioned symbol will only exist for architectures where the\n// oldest Glibc version pre-dates support for that architecture.\n// For example, regexec@GLIBC_2.3.4 exists on x86_64, but not RISC-V.\n// See also https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98920.\n#define COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(fn, ver) \\\n  COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(fn, ver)\n#else\n#define COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(fn, ver) \\\n  COMMON_INTERCEPT_FUNCTION(fn)\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL\n#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \\\n  {                                                       \\\n    if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)        \\\n      return internal_memset(dst, v, size);               \\\n    COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size);  \\\n    if (common_flags()->intercept_intrin)                 \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);     \\\n    return REAL(memset)(dst, v, size);                    \\\n  }\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL\n#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \\\n  {                                                          \\\n    if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)           \\\n      return internal_memmove(dst, src, size);               \\\n    COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size);  \\\n    if (common_flags()->intercept_intrin) {                  \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);        \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);         \\\n    }                                                        \\\n    return REAL(memmove)(dst, src, size);                    \\\n  }\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL\n#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \\\n  {                                                         \\\n    if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {        \\\n      return internal_memmove(dst, src, size);              \\\n    }                                                       \\\n    COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size);  \\\n    if (common_flags()->intercept_intrin) {                 \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);       \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);        \\\n    }                                                       \\\n    return REAL(memcpy)(dst, src, size);                    \\\n  }\n#endif\n\n#ifndef COMMON_INTERCEPTOR_MMAP_IMPL\n#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \\\n                                     off)                                  \\\n  { return REAL(mmap)(addr, sz, prot, flags, fd, off); }\n#endif\n\n#ifndef COMMON_INTERCEPTOR_COPY_STRING\n#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}\n#endif\n\n#ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL\n#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size)                         \\\n  COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size);                            \\\n  uptr copy_length = internal_strnlen(s, size);                               \\\n  char *new_mem = (char *)WRAP(malloc)(copy_length + 1);                      \\\n  if (common_flags()->intercept_strndup) {                                    \\\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1));       \\\n  }                                                                           \\\n  COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length);               \\\n  internal_memcpy(new_mem, s, copy_length);                                   \\\n  new_mem[copy_length] = '\\0';                                                \\\n  return new_mem;\n#endif\n\n#ifndef COMMON_INTERCEPTOR_STRERROR\n#define COMMON_INTERCEPTOR_STRERROR() {}\n#endif\n\nstruct FileMetadata {\n  // For open_memstream().\n  char **addr;\n  SIZE_T *size;\n};\n\nstruct CommonInterceptorMetadata {\n  enum {\n    CIMT_INVALID = 0,\n    CIMT_FILE\n  } type;\n  union {\n    FileMetadata file;\n  };\n};\n\n#if SI_POSIX\ntypedef AddrHashMap<CommonInterceptorMetadata, 31051> MetadataHashMap;\n\nstatic MetadataHashMap *interceptor_metadata_map;\n\nUNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,\n                                          const FileMetadata &file) {\n  MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);\n  CHECK(h.created());\n  h->type = CommonInterceptorMetadata::CIMT_FILE;\n  h->file = file;\n}\n\nUNUSED static const FileMetadata *GetInterceptorMetadata(\n    __sanitizer_FILE *addr) {\n  MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr,\n                            /* remove */ false,\n                            /* create */ false);\n  if (addr && h.exists()) {\n    CHECK(!h.created());\n    CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE);\n    return &h->file;\n  } else {\n    return 0;\n  }\n}\n\nUNUSED static void DeleteInterceptorMetadata(void *addr) {\n  MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);\n  CHECK(h.exists());\n}\n#endif  // SI_POSIX\n\n#if SANITIZER_INTERCEPT_STRLEN\nINTERCEPTOR(SIZE_T, strlen, const char *s) {\n  // Sometimes strlen is called prior to InitializeCommonInterceptors,\n  // in which case the REAL(strlen) typically used in\n  // COMMON_INTERCEPTOR_ENTER will fail.  We use internal_strlen here\n  // to handle that.\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_strlen(s);\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strlen, s);\n  SIZE_T result = REAL(strlen)(s);\n  if (common_flags()->intercept_strlen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, result + 1);\n  return result;\n}\n#define INIT_STRLEN COMMON_INTERCEPT_FUNCTION(strlen)\n#else\n#define INIT_STRLEN\n#endif\n\n#if SANITIZER_INTERCEPT_STRNLEN\nINTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strnlen, s, maxlen);\n  SIZE_T length = REAL(strnlen)(s, maxlen);\n  if (common_flags()->intercept_strlen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, Min(length + 1, maxlen));\n  return length;\n}\n#define INIT_STRNLEN COMMON_INTERCEPT_FUNCTION(strnlen)\n#else\n#define INIT_STRNLEN\n#endif\n\n#if SANITIZER_INTERCEPT_STRNDUP\nINTERCEPTOR(char*, strndup, const char *s, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);\n}\n#define INIT_STRNDUP COMMON_INTERCEPT_FUNCTION(strndup)\n#else\n#define INIT_STRNDUP\n#endif // SANITIZER_INTERCEPT_STRNDUP\n\n#if SANITIZER_INTERCEPT___STRNDUP\nINTERCEPTOR(char*, __strndup, const char *s, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);\n}\n#define INIT___STRNDUP COMMON_INTERCEPT_FUNCTION(__strndup)\n#else\n#define INIT___STRNDUP\n#endif // SANITIZER_INTERCEPT___STRNDUP\n\n#if SANITIZER_INTERCEPT_TEXTDOMAIN\nINTERCEPTOR(char*, textdomain, const char *domainname) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);\n  if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);\n  char *domain = REAL(textdomain)(domainname);\n  if (domain) {\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, internal_strlen(domain) + 1);\n  }\n  return domain;\n}\n#define INIT_TEXTDOMAIN COMMON_INTERCEPT_FUNCTION(textdomain)\n#else\n#define INIT_TEXTDOMAIN\n#endif\n\n#if SANITIZER_INTERCEPT_STRCMP\nstatic inline int CharCmpX(unsigned char c1, unsigned char c2) {\n  return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,\n                              const char *s1, const char *s2, int result)\n\nINTERCEPTOR(int, strcmp, const char *s1, const char *s2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2);\n  unsigned char c1, c2;\n  uptr i;\n  for (i = 0;; i++) {\n    c1 = (unsigned char)s1[i];\n    c2 = (unsigned char)s2[i];\n    if (c1 != c2 || c1 == '\\0') break;\n  }\n  if (common_flags()->intercept_strcmp) {\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);\n  }\n  int result = CharCmpX(c1, c2);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,\n                             s2, result);\n  return result;\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc,\n                              const char *s1, const char *s2, uptr n,\n                              int result)\n\nINTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_strncmp(s1, s2, size);\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);\n  unsigned char c1 = 0, c2 = 0;\n  uptr i;\n  for (i = 0; i < size; i++) {\n    c1 = (unsigned char)s1[i];\n    c2 = (unsigned char)s2[i];\n    if (c1 != c2 || c1 == '\\0') break;\n  }\n  uptr i1 = i;\n  uptr i2 = i;\n  if (common_flags()->strict_string_checks) {\n    for (; i1 < size && s1[i1]; i1++) {}\n    for (; i2 < size && s2[i2]; i2++) {}\n  }\n  COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));\n  COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));\n  int result = CharCmpX(c1, c2);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,\n                             s2, size, result);\n  return result;\n}\n\n#define INIT_STRCMP COMMON_INTERCEPT_FUNCTION(strcmp)\n#define INIT_STRNCMP COMMON_INTERCEPT_FUNCTION(strncmp)\n#else\n#define INIT_STRCMP\n#define INIT_STRNCMP\n#endif\n\n#if SANITIZER_INTERCEPT_STRCASECMP\nstatic inline int CharCaseCmp(unsigned char c1, unsigned char c2) {\n  int c1_low = ToLower(c1);\n  int c2_low = ToLower(c2);\n  return c1_low - c2_low;\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, uptr called_pc,\n                              const char *s1, const char *s2, int result)\n\nINTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2);\n  unsigned char c1 = 0, c2 = 0;\n  uptr i;\n  for (i = 0;; i++) {\n    c1 = (unsigned char)s1[i];\n    c2 = (unsigned char)s2[i];\n    if (CharCaseCmp(c1, c2) != 0 || c1 == '\\0') break;\n  }\n  COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);\n  int result = CharCaseCmp(c1, c2);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, GET_CALLER_PC(),\n                             s1, s2, result);\n  return result;\n}\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,\n                              const char *s1, const char *s2, uptr size,\n                              int result)\n\nINTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);\n  unsigned char c1 = 0, c2 = 0;\n  uptr i;\n  for (i = 0; i < size; i++) {\n    c1 = (unsigned char)s1[i];\n    c2 = (unsigned char)s2[i];\n    if (CharCaseCmp(c1, c2) != 0 || c1 == '\\0') break;\n  }\n  uptr i1 = i;\n  uptr i2 = i;\n  if (common_flags()->strict_string_checks) {\n    for (; i1 < size && s1[i1]; i1++) {}\n    for (; i2 < size && s2[i2]; i2++) {}\n  }\n  COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));\n  COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));\n  int result = CharCaseCmp(c1, c2);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),\n                             s1, s2, size, result);\n  return result;\n}\n\n#define INIT_STRCASECMP COMMON_INTERCEPT_FUNCTION(strcasecmp)\n#define INIT_STRNCASECMP COMMON_INTERCEPT_FUNCTION(strncasecmp)\n#else\n#define INIT_STRCASECMP\n#define INIT_STRNCASECMP\n#endif\n\n#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR\nstatic inline void StrstrCheck(void *ctx, char *r, const char *s1,\n                               const char *s2) {\n    uptr len1 = internal_strlen(s1);\n    uptr len2 = internal_strlen(s2);\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);\n}\n#endif\n\n#if SANITIZER_INTERCEPT_STRSTR\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,\n                              const char *s1, const char *s2, char *result)\n\nINTERCEPTOR(char*, strstr, const char *s1, const char *s2) {\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_strstr(s1, s2);\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strstr, s1, s2);\n  char *r = REAL(strstr)(s1, s2);\n  if (common_flags()->intercept_strstr)\n    StrstrCheck(ctx, r, s1, s2);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, GET_CALLER_PC(), s1,\n                             s2, r);\n  return r;\n}\n\n#define INIT_STRSTR COMMON_INTERCEPT_FUNCTION(strstr);\n#else\n#define INIT_STRSTR\n#endif\n\n#if SANITIZER_INTERCEPT_STRCASESTR\n\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,\n                              const char *s1, const char *s2, char *result)\n\nINTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2);\n  char *r = REAL(strcasestr)(s1, s2);\n  if (common_flags()->intercept_strstr)\n    StrstrCheck(ctx, r, s1, s2);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, GET_CALLER_PC(),\n                             s1, s2, r);\n  return r;\n}\n\n#define INIT_STRCASESTR COMMON_INTERCEPT_FUNCTION(strcasestr);\n#else\n#define INIT_STRCASESTR\n#endif\n\n#if SANITIZER_INTERCEPT_STRTOK\n\nINTERCEPTOR(char*, strtok, char *str, const char *delimiters) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters);\n  if (!common_flags()->intercept_strtok) {\n    return REAL(strtok)(str, delimiters);\n  }\n  if (common_flags()->strict_string_checks) {\n    // If strict_string_checks is enabled, we check the whole first argument\n    // string on the first call (strtok saves this string in a static buffer\n    // for subsequent calls). We do not need to check strtok's result.\n    // As the delimiters can change, we check them every call.\n    if (str != nullptr) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);\n    }\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,\n                                  internal_strlen(delimiters) + 1);\n    return REAL(strtok)(str, delimiters);\n  } else {\n    // However, when strict_string_checks is disabled we cannot check the\n    // whole string on the first call. Instead, we check the result string\n    // which is guaranteed to be a NULL-terminated substring of the first\n    // argument. We also conservatively check one character of str and the\n    // delimiters.\n    if (str != nullptr) {\n      COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1);\n    }\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);\n    char *result = REAL(strtok)(str, delimiters);\n    if (result != nullptr) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, result, internal_strlen(result) + 1);\n    } else if (str != nullptr) {\n      // No delimiter were found, it's safe to assume that the entire str was\n      // scanned.\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);\n    }\n    return result;\n  }\n}\n\n#define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok)\n#else\n#define INIT_STRTOK\n#endif\n\n#if SANITIZER_INTERCEPT_MEMMEM\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,\n                              const void *s1, SIZE_T len1, const void *s2,\n                              SIZE_T len2, void *result)\n\nINTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,\n            SIZE_T len2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, memmem, s1, len1, s2, len2);\n  void *r = REAL(memmem)(s1, len1, s2, len2);\n  if (common_flags()->intercept_memmem) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, len1);\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2);\n  }\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, GET_CALLER_PC(),\n                             s1, len1, s2, len2, r);\n  return r;\n}\n\n#define INIT_MEMMEM COMMON_INTERCEPT_FUNCTION(memmem);\n#else\n#define INIT_MEMMEM\n#endif  // SANITIZER_INTERCEPT_MEMMEM\n\n#if SANITIZER_INTERCEPT_STRCHR\nINTERCEPTOR(char*, strchr, const char *s, int c) {\n  void *ctx;\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_strchr(s, c);\n  COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c);\n  char *result = REAL(strchr)(s, c);\n  if (common_flags()->intercept_strchr) {\n    // Keep strlen as macro argument, as macro may ignore it.\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s,\n      (result ? result - s : internal_strlen(s)) + 1);\n  }\n  return result;\n}\n#define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr)\n#else\n#define INIT_STRCHR\n#endif\n\n#if SANITIZER_INTERCEPT_STRCHRNUL\nINTERCEPTOR(char*, strchrnul, const char *s, int c) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strchrnul, s, c);\n  char *result = REAL(strchrnul)(s, c);\n  uptr len = result - s + 1;\n  if (common_flags()->intercept_strchr)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s, len);\n  return result;\n}\n#define INIT_STRCHRNUL COMMON_INTERCEPT_FUNCTION(strchrnul)\n#else\n#define INIT_STRCHRNUL\n#endif\n\n#if SANITIZER_INTERCEPT_STRRCHR\nINTERCEPTOR(char*, strrchr, const char *s, int c) {\n  void *ctx;\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_strrchr(s, c);\n  COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);\n  if (common_flags()->intercept_strchr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);\n  return REAL(strrchr)(s, c);\n}\n#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)\n#else\n#define INIT_STRRCHR\n#endif\n\n#if SANITIZER_INTERCEPT_STRSPN\nINTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);\n  SIZE_T r = REAL(strspn)(s1, s2);\n  if (common_flags()->intercept_strspn) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);\n  }\n  return r;\n}\n\nINTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);\n  SIZE_T r = REAL(strcspn)(s1, s2);\n  if (common_flags()->intercept_strspn) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);\n  }\n  return r;\n}\n\n#define INIT_STRSPN \\\n  COMMON_INTERCEPT_FUNCTION(strspn); \\\n  COMMON_INTERCEPT_FUNCTION(strcspn);\n#else\n#define INIT_STRSPN\n#endif\n\n#if SANITIZER_INTERCEPT_STRPBRK\nINTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);\n  char *r = REAL(strpbrk)(s1, s2);\n  if (common_flags()->intercept_strpbrk) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);\n    COMMON_INTERCEPTOR_READ_STRING(ctx, s1,\n        r ? r - s1 + 1 : internal_strlen(s1) + 1);\n  }\n  return r;\n}\n\n#define INIT_STRPBRK COMMON_INTERCEPT_FUNCTION(strpbrk);\n#else\n#define INIT_STRPBRK\n#endif\n\n#if SANITIZER_INTERCEPT_MEMSET\nINTERCEPTOR(void *, memset, void *dst, int v, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);\n}\n\n#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)\n#else\n#define INIT_MEMSET\n#endif\n\n#if SANITIZER_INTERCEPT_MEMMOVE\nINTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);\n}\n\n#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)\n#else\n#define INIT_MEMMOVE\n#endif\n\n#if SANITIZER_INTERCEPT_MEMCPY\nINTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {\n  // On OS X, calling internal_memcpy here will cause memory corruptions,\n  // because memcpy and memmove are actually aliases of the same\n  // implementation.  We need to use internal_memmove here.\n  // N.B.: If we switch this to internal_ we'll have to use internal_memmove\n  // due to memcpy being an alias of memmove on OS X.\n  void *ctx;\n#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE\n    COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);\n#else\n    COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);\n#endif\n}\n\n#define INIT_MEMCPY                                  \\\n  do {                                               \\\n    if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \\\n      COMMON_INTERCEPT_FUNCTION(memcpy);             \\\n    } else {                                         \\\n      ASSIGN_REAL(memcpy, memmove);                  \\\n    }                                                \\\n    CHECK(REAL(memcpy));                             \\\n  } while (false)\n\n#else\n#define INIT_MEMCPY\n#endif\n\n#if SANITIZER_INTERCEPT_MEMCMP\nDECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,\n                              const void *s1, const void *s2, uptr n,\n                              int result)\n\n// Common code for `memcmp` and `bcmp`.\nint MemcmpInterceptorCommon(void *ctx,\n                            int (*real_fn)(const void *, const void *, uptr),\n                            const void *a1, const void *a2, uptr size) {\n  if (common_flags()->intercept_memcmp) {\n    if (common_flags()->strict_memcmp) {\n      // Check the entire regions even if the first bytes of the buffers are\n      // different.\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, a1, size);\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, a2, size);\n      // Fallthrough to REAL(memcmp) below.\n    } else {\n      unsigned char c1 = 0, c2 = 0;\n      const unsigned char *s1 = (const unsigned char*)a1;\n      const unsigned char *s2 = (const unsigned char*)a2;\n      uptr i;\n      for (i = 0; i < size; i++) {\n        c1 = s1[i];\n        c2 = s2[i];\n        if (c1 != c2) break;\n      }\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));\n      int r = CharCmpX(c1, c2);\n      CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(),\n                                 a1, a2, size, r);\n      return r;\n    }\n  }\n  int result = real_fn(a1, a2, size);\n  CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1,\n                             a2, size, result);\n  return result;\n}\n\nINTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_memcmp(a1, a2, size);\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size);\n  return MemcmpInterceptorCommon(ctx, REAL(memcmp), a1, a2, size);\n}\n\n#define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp)\n#else\n#define INIT_MEMCMP\n#endif\n\n#if SANITIZER_INTERCEPT_BCMP\nINTERCEPTOR(int, bcmp, const void *a1, const void *a2, uptr size) {\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_memcmp(a1, a2, size);\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, bcmp, a1, a2, size);\n  return MemcmpInterceptorCommon(ctx, REAL(bcmp), a1, a2, size);\n}\n\n#define INIT_BCMP COMMON_INTERCEPT_FUNCTION(bcmp)\n#else\n#define INIT_BCMP\n#endif\n\n#if SANITIZER_INTERCEPT_MEMCHR\nINTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_memchr(s, c, n);\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);\n#if SANITIZER_WINDOWS\n  void *res;\n  if (REAL(memchr)) {\n    res = REAL(memchr)(s, c, n);\n  } else {\n    res = internal_memchr(s, c, n);\n  }\n#else\n  void *res = REAL(memchr)(s, c, n);\n#endif\n  uptr len = res ? (char *)res - (const char *)s + 1 : n;\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);\n  return res;\n}\n\n#define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr)\n#else\n#define INIT_MEMCHR\n#endif\n\n#if SANITIZER_INTERCEPT_MEMRCHR\nINTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n);\n  return REAL(memrchr)(s, c, n);\n}\n\n#define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr)\n#else\n#define INIT_MEMRCHR\n#endif\n\n#if SANITIZER_INTERCEPT_FREXP\nINTERCEPTOR(double, frexp, double x, int *exp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);\n  // Assuming frexp() always writes to |exp|.\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));\n  double res = REAL(frexp)(x, exp);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));\n  return res;\n}\n\n#define INIT_FREXP COMMON_INTERCEPT_FUNCTION(frexp);\n#else\n#define INIT_FREXP\n#endif  // SANITIZER_INTERCEPT_FREXP\n\n#if SANITIZER_INTERCEPT_FREXPF_FREXPL\nINTERCEPTOR(float, frexpf, float x, int *exp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));\n  float res = REAL(frexpf)(x, exp);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));\n  return res;\n}\n\nINTERCEPTOR(long double, frexpl, long double x, int *exp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));\n  long double res = REAL(frexpl)(x, exp);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));\n  return res;\n}\n\n#define INIT_FREXPF_FREXPL           \\\n  COMMON_INTERCEPT_FUNCTION(frexpf); \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(frexpl)\n#else\n#define INIT_FREXPF_FREXPL\n#endif  // SANITIZER_INTERCEPT_FREXPF_FREXPL\n\n#if SI_POSIX\nstatic void write_iovec(void *ctx, struct __sanitizer_iovec *iovec,\n                        SIZE_T iovlen, SIZE_T maxlen) {\n  for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {\n    SSIZE_T sz = Min(iovec[i].iov_len, maxlen);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec[i].iov_base, sz);\n    maxlen -= sz;\n  }\n}\n\nstatic void read_iovec(void *ctx, struct __sanitizer_iovec *iovec,\n                       SIZE_T iovlen, SIZE_T maxlen) {\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec) * iovlen);\n  for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {\n    SSIZE_T sz = Min(iovec[i].iov_len, maxlen);\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec[i].iov_base, sz);\n    maxlen -= sz;\n  }\n}\n#endif\n\n#if SANITIZER_INTERCEPT_READ\nINTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(read)(fd, ptr, count);\n  if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n#define INIT_READ COMMON_INTERCEPT_FUNCTION(read)\n#else\n#define INIT_READ\n#endif\n\n#if SANITIZER_INTERCEPT_FREAD\nINTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {\n  // libc file streams can call user-supplied functions, see fopencookie.\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(fread)(ptr, size, nmemb, file);\n  if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size);\n  return res;\n}\n#define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread)\n#else\n#define INIT_FREAD\n#endif\n\n#if SANITIZER_INTERCEPT_PREAD\nINTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(pread)(fd, ptr, count, offset);\n  if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n#define INIT_PREAD COMMON_INTERCEPT_FUNCTION(pread)\n#else\n#define INIT_PREAD\n#endif\n\n#if SANITIZER_INTERCEPT_PREAD64\nINTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);\n  if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n#define INIT_PREAD64 COMMON_INTERCEPT_FUNCTION(pread64)\n#else\n#define INIT_PREAD64\n#endif\n\n#if SANITIZER_INTERCEPT_READV\nINTERCEPTOR_WITH_SUFFIX(SSIZE_T, readv, int fd, __sanitizer_iovec *iov,\n                        int iovcnt) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readv, fd, iov, iovcnt);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  SSIZE_T res = REAL(readv)(fd, iov, iovcnt);\n  if (res > 0) write_iovec(ctx, iov, iovcnt, res);\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n#define INIT_READV COMMON_INTERCEPT_FUNCTION(readv)\n#else\n#define INIT_READV\n#endif\n\n#if SANITIZER_INTERCEPT_PREADV\nINTERCEPTOR(SSIZE_T, preadv, int fd, __sanitizer_iovec *iov, int iovcnt,\n            OFF_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, preadv, fd, iov, iovcnt, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  SSIZE_T res = REAL(preadv)(fd, iov, iovcnt, offset);\n  if (res > 0) write_iovec(ctx, iov, iovcnt, res);\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n#define INIT_PREADV COMMON_INTERCEPT_FUNCTION(preadv)\n#else\n#define INIT_PREADV\n#endif\n\n#if SANITIZER_INTERCEPT_PREADV64\nINTERCEPTOR(SSIZE_T, preadv64, int fd, __sanitizer_iovec *iov, int iovcnt,\n            OFF64_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, preadv64, fd, iov, iovcnt, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  SSIZE_T res = REAL(preadv64)(fd, iov, iovcnt, offset);\n  if (res > 0) write_iovec(ctx, iov, iovcnt, res);\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n#define INIT_PREADV64 COMMON_INTERCEPT_FUNCTION(preadv64)\n#else\n#define INIT_PREADV64\n#endif\n\n#if SANITIZER_INTERCEPT_WRITE\nINTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  SSIZE_T res = REAL(write)(fd, ptr, count);\n  // FIXME: this check should be _before_ the call to REAL(write), not after\n  if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);\n  return res;\n}\n#define INIT_WRITE COMMON_INTERCEPT_FUNCTION(write)\n#else\n#define INIT_WRITE\n#endif\n\n#if SANITIZER_INTERCEPT_FWRITE\nINTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) {\n  // libc file streams can call user-supplied functions, see fopencookie.\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file);\n  SIZE_T res = REAL(fwrite)(p, size, nmemb, file);\n  if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size);\n  return res;\n}\n#define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite)\n#else\n#define INIT_FWRITE\n#endif\n\n#if SANITIZER_INTERCEPT_PWRITE\nINTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset);\n  if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);\n  return res;\n}\n#define INIT_PWRITE COMMON_INTERCEPT_FUNCTION(pwrite)\n#else\n#define INIT_PWRITE\n#endif\n\n#if SANITIZER_INTERCEPT_PWRITE64\nINTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count,\n            OFF64_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset);\n  if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);\n  return res;\n}\n#define INIT_PWRITE64 COMMON_INTERCEPT_FUNCTION(pwrite64)\n#else\n#define INIT_PWRITE64\n#endif\n\n#if SANITIZER_INTERCEPT_WRITEV\nINTERCEPTOR_WITH_SUFFIX(SSIZE_T, writev, int fd, __sanitizer_iovec *iov,\n                        int iovcnt) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, writev, fd, iov, iovcnt);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  SSIZE_T res = REAL(writev)(fd, iov, iovcnt);\n  if (res > 0) read_iovec(ctx, iov, iovcnt, res);\n  return res;\n}\n#define INIT_WRITEV COMMON_INTERCEPT_FUNCTION(writev)\n#else\n#define INIT_WRITEV\n#endif\n\n#if SANITIZER_INTERCEPT_PWRITEV\nINTERCEPTOR(SSIZE_T, pwritev, int fd, __sanitizer_iovec *iov, int iovcnt,\n            OFF_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pwritev, fd, iov, iovcnt, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  SSIZE_T res = REAL(pwritev)(fd, iov, iovcnt, offset);\n  if (res > 0) read_iovec(ctx, iov, iovcnt, res);\n  return res;\n}\n#define INIT_PWRITEV COMMON_INTERCEPT_FUNCTION(pwritev)\n#else\n#define INIT_PWRITEV\n#endif\n\n#if SANITIZER_INTERCEPT_PWRITEV64\nINTERCEPTOR(SSIZE_T, pwritev64, int fd, __sanitizer_iovec *iov, int iovcnt,\n            OFF64_T offset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pwritev64, fd, iov, iovcnt, offset);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  SSIZE_T res = REAL(pwritev64)(fd, iov, iovcnt, offset);\n  if (res > 0) read_iovec(ctx, iov, iovcnt, res);\n  return res;\n}\n#define INIT_PWRITEV64 COMMON_INTERCEPT_FUNCTION(pwritev64)\n#else\n#define INIT_PWRITEV64\n#endif\n\n#if SANITIZER_INTERCEPT_FGETS\nINTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) {\n  // libc file streams can call user-supplied functions, see fopencookie.\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgets, s, size, file);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(fgets)(s, size, file);\n  if (res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);\n  return res;\n}\n#define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets)\n#else\n#define INIT_FGETS\n#endif\n\n#if SANITIZER_INTERCEPT_FPUTS\nINTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) {\n  // libc file streams can call user-supplied functions, see fopencookie.\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file);\n  if (!SANITIZER_MAC || s) {  // `fputs(NULL, file)` is supported on Darwin.\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);\n  }\n  return REAL(fputs)(s, file);\n}\n#define INIT_FPUTS COMMON_INTERCEPT_FUNCTION(fputs)\n#else\n#define INIT_FPUTS\n#endif\n\n#if SANITIZER_INTERCEPT_PUTS\nINTERCEPTOR(int, puts, char *s) {\n  // libc file streams can call user-supplied functions, see fopencookie.\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, puts, s);\n  if (!SANITIZER_MAC || s) {  // `puts(NULL)` is supported on Darwin.\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);\n  }\n  return REAL(puts)(s);\n}\n#define INIT_PUTS COMMON_INTERCEPT_FUNCTION(puts)\n#else\n#define INIT_PUTS\n#endif\n\n#if SANITIZER_INTERCEPT_PRCTL\nINTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,\n            unsigned long arg4, unsigned long arg5) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);\n  static const int PR_SET_NAME = 15;\n  int res = REAL(prctl(option, arg2, arg3, arg4, arg5));\n  if (option == PR_SET_NAME) {\n    char buff[16];\n    internal_strncpy(buff, (char *)arg2, 15);\n    buff[15] = 0;\n    COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);\n  }\n  return res;\n}\n#define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl)\n#else\n#define INIT_PRCTL\n#endif  // SANITIZER_INTERCEPT_PRCTL\n\n#if SANITIZER_INTERCEPT_TIME\nINTERCEPTOR(unsigned long, time, unsigned long *t) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, time, t);\n  unsigned long local_t;\n  unsigned long res = REAL(time)(&local_t);\n  if (t && res != (unsigned long)-1) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));\n    *t = local_t;\n  }\n  return res;\n}\n#define INIT_TIME COMMON_INTERCEPT_FUNCTION(time);\n#else\n#define INIT_TIME\n#endif  // SANITIZER_INTERCEPT_TIME\n\n#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS\nstatic void unpoison_tm(void *ctx, __sanitizer_tm *tm) {\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));\n#if !SANITIZER_SOLARIS\n  if (tm->tm_zone) {\n    // Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone\n    // can point to shared memory and tsan would report a data race.\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,\n                                        internal_strlen(tm->tm_zone) + 1);\n  }\n#endif\n}\nINTERCEPTOR(__sanitizer_tm *, localtime, unsigned long *timep) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep);\n  __sanitizer_tm *res = REAL(localtime)(timep);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));\n    unpoison_tm(ctx, res);\n  }\n  return res;\n}\nINTERCEPTOR(__sanitizer_tm *, localtime_r, unsigned long *timep, void *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result);\n  __sanitizer_tm *res = REAL(localtime_r)(timep, result);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));\n    unpoison_tm(ctx, res);\n  }\n  return res;\n}\nINTERCEPTOR(__sanitizer_tm *, gmtime, unsigned long *timep) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep);\n  __sanitizer_tm *res = REAL(gmtime)(timep);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));\n    unpoison_tm(ctx, res);\n  }\n  return res;\n}\nINTERCEPTOR(__sanitizer_tm *, gmtime_r, unsigned long *timep, void *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result);\n  __sanitizer_tm *res = REAL(gmtime_r)(timep, result);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));\n    unpoison_tm(ctx, res);\n  }\n  return res;\n}\nINTERCEPTOR(char *, ctime, unsigned long *timep) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(ctime)(timep);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  }\n  return res;\n}\nINTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(ctime_r)(timep, result);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  }\n  return res;\n}\nINTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(asctime)(tm);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  }\n  return res;\n}\nINTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(asctime_r)(tm, result);\n  if (res) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  }\n  return res;\n}\nINTERCEPTOR(long, mktime, __sanitizer_tm *tm) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst));\n  long res = REAL(mktime)(tm);\n  if (res != -1) unpoison_tm(ctx, tm);\n  return res;\n}\n#define INIT_LOCALTIME_AND_FRIENDS        \\\n  COMMON_INTERCEPT_FUNCTION(localtime);   \\\n  COMMON_INTERCEPT_FUNCTION(localtime_r); \\\n  COMMON_INTERCEPT_FUNCTION(gmtime);      \\\n  COMMON_INTERCEPT_FUNCTION(gmtime_r);    \\\n  COMMON_INTERCEPT_FUNCTION(ctime);       \\\n  COMMON_INTERCEPT_FUNCTION(ctime_r);     \\\n  COMMON_INTERCEPT_FUNCTION(asctime);     \\\n  COMMON_INTERCEPT_FUNCTION(asctime_r);   \\\n  COMMON_INTERCEPT_FUNCTION(mktime);\n#else\n#define INIT_LOCALTIME_AND_FRIENDS\n#endif  // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS\n\n#if SANITIZER_INTERCEPT_STRPTIME\nINTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);\n  if (format)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(strptime)(s, format, tm);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0);\n  if (res && tm) {\n    // Do not call unpoison_tm here, because strptime does not, in fact,\n    // initialize the entire struct tm. For example, tm_zone pointer is left\n    // uninitialized.\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));\n  }\n  return res;\n}\n#define INIT_STRPTIME COMMON_INTERCEPT_FUNCTION(strptime);\n#else\n#define INIT_STRPTIME\n#endif\n\n#if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF\n#include \"sanitizer_common_interceptors_format.inc\"\n\n#define FORMAT_INTERCEPTOR_IMPL(name, vname, ...)                              \\\n  {                                                                            \\\n    void *ctx;                                                                 \\\n    va_list ap;                                                                \\\n    va_start(ap, format);                                                      \\\n    COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap);                     \\\n    int res = WRAP(vname)(__VA_ARGS__, ap);                                    \\\n    va_end(ap);                                                                \\\n    return res;                                                                \\\n  }\n\n#endif\n\n#if SANITIZER_INTERCEPT_SCANF\n\n#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...)                    \\\n  {                                                                            \\\n    void *ctx;                                                                 \\\n    COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__);                         \\\n    va_list aq;                                                                \\\n    va_copy(aq, ap);                                                           \\\n    int res = REAL(vname)(__VA_ARGS__);                                        \\\n    if (res > 0)                                                               \\\n      scanf_common(ctx, res, allowGnuMalloc, format, aq);                      \\\n    va_end(aq);                                                                \\\n    return res;                                                                \\\n  }\n\nINTERCEPTOR(int, vscanf, const char *format, va_list ap)\nVSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap)\n\nINTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap)\nVSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap)\n\nINTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap)\nVSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap)\n\n#if SANITIZER_INTERCEPT_ISOC99_SCANF\nINTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap)\nVSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap)\n\nINTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format,\n            va_list ap)\nVSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)\n\nINTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)\nVSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)\n#endif  // SANITIZER_INTERCEPT_ISOC99_SCANF\n\nINTERCEPTOR(int, scanf, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format)\n\nINTERCEPTOR(int, fscanf, void *stream, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)\n\nINTERCEPTOR(int, sscanf, const char *str, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)\n\n#if SANITIZER_INTERCEPT_ISOC99_SCANF\nINTERCEPTOR(int, __isoc99_scanf, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)\n\nINTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)\n\nINTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)\n#endif\n\n#endif\n\n#if SANITIZER_INTERCEPT_SCANF\n#define INIT_SCANF                    \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(scanf);   \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(sscanf);  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(fscanf);  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vscanf);  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);\n#else\n#define INIT_SCANF\n#endif\n\n#if SANITIZER_INTERCEPT_ISOC99_SCANF\n#define INIT_ISOC99_SCANF                      \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_scanf);   \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_sscanf);  \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf);  \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf);  \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);\n#else\n#define INIT_ISOC99_SCANF\n#endif\n\n#if SANITIZER_INTERCEPT_PRINTF\n\n#define VPRINTF_INTERCEPTOR_ENTER(vname, ...)                                  \\\n  void *ctx;                                                                   \\\n  COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__);                           \\\n  va_list aq;                                                                  \\\n  va_copy(aq, ap);\n\n#define VPRINTF_INTERCEPTOR_RETURN()                                           \\\n  va_end(aq);\n\n#define VPRINTF_INTERCEPTOR_IMPL(vname, ...)                                   \\\n  {                                                                            \\\n    VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__);                             \\\n    if (common_flags()->check_printf)                                          \\\n      printf_common(ctx, format, aq);                                          \\\n    int res = REAL(vname)(__VA_ARGS__);                                        \\\n    VPRINTF_INTERCEPTOR_RETURN();                                              \\\n    return res;                                                                \\\n  }\n\n// FIXME: under ASan the REAL() call below may write to freed memory and\n// corrupt its metadata. See\n// https://github.com/google/sanitizers/issues/321.\n#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...)                             \\\n  {                                                                            \\\n    VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__)                         \\\n    if (common_flags()->check_printf) {                                        \\\n      printf_common(ctx, format, aq);                                          \\\n    }                                                                          \\\n    int res = REAL(vname)(str, __VA_ARGS__);                                   \\\n    if (res >= 0) {                                                            \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1);                       \\\n    }                                                                          \\\n    VPRINTF_INTERCEPTOR_RETURN();                                              \\\n    return res;                                                                \\\n  }\n\n// FIXME: under ASan the REAL() call below may write to freed memory and\n// corrupt its metadata. See\n// https://github.com/google/sanitizers/issues/321.\n#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...)                      \\\n  {                                                                            \\\n    VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__)                   \\\n    if (common_flags()->check_printf) {                                        \\\n      printf_common(ctx, format, aq);                                          \\\n    }                                                                          \\\n    int res = REAL(vname)(str, size, __VA_ARGS__);                             \\\n    if (res >= 0) {                                                            \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1)));  \\\n    }                                                                          \\\n    VPRINTF_INTERCEPTOR_RETURN();                                              \\\n    return res;                                                                \\\n  }\n\n// FIXME: under ASan the REAL() call below may write to freed memory and\n// corrupt its metadata. See\n// https://github.com/google/sanitizers/issues/321.\n#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...)                           \\\n  {                                                                            \\\n    VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__)                        \\\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *));                 \\\n    if (common_flags()->check_printf) {                                        \\\n      printf_common(ctx, format, aq);                                          \\\n    }                                                                          \\\n    int res = REAL(vname)(strp, __VA_ARGS__);                                  \\\n    if (res >= 0) {                                                            \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1);                     \\\n    }                                                                          \\\n    VPRINTF_INTERCEPTOR_RETURN();                                              \\\n    return res;                                                                \\\n  }\n\nINTERCEPTOR(int, vprintf, const char *format, va_list ap)\nVPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap)\n\nINTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format,\n            va_list ap)\nVPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap)\n\nINTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,\n            va_list ap)\nVSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)\n\n#if SANITIZER_INTERCEPT___PRINTF_CHK\nINTERCEPTOR(int, __vsnprintf_chk, char *str, SIZE_T size, int flag,\n            SIZE_T size_to, const char *format, va_list ap)\nVSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)\n#endif\n\n#if SANITIZER_INTERCEPT_PRINTF_L\nINTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,\n            const char *format, va_list ap)\nVSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)\n\nINTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,\n            const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)\n#endif  // SANITIZER_INTERCEPT_PRINTF_L\n\nINTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)\nVSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)\n\n#if SANITIZER_INTERCEPT___PRINTF_CHK\nINTERCEPTOR(int, __vsprintf_chk, char *str, int flag, SIZE_T size_to,\n            const char *format, va_list ap)\nVSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)\n#endif\n\nINTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)\nVASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)\n\n#if SANITIZER_INTERCEPT_ISOC99_PRINTF\nINTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)\nVPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)\n\nINTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream,\n            const char *format, va_list ap)\nVPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap)\n\nINTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format,\n            va_list ap)\nVSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap)\n\nINTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format,\n            va_list ap)\nVSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format,\n                          ap)\n\n#endif  // SANITIZER_INTERCEPT_ISOC99_PRINTF\n\nINTERCEPTOR(int, printf, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(printf, vprintf, format)\n\nINTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format)\n\n#if SANITIZER_INTERCEPT___PRINTF_CHK\nINTERCEPTOR(int, __fprintf_chk, __sanitizer_FILE *stream, SIZE_T size,\n            const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__fprintf_chk, vfprintf, stream, format)\n#endif\n\nINTERCEPTOR(int, sprintf, char *str, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format)\n\n#if SANITIZER_INTERCEPT___PRINTF_CHK\nINTERCEPTOR(int, __sprintf_chk, char *str, int flag, SIZE_T size_to,\n            const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__sprintf_chk, vsprintf, str, format)\n#endif\n\nINTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format)\n\n#if SANITIZER_INTERCEPT___PRINTF_CHK\nINTERCEPTOR(int, __snprintf_chk, char *str, SIZE_T size, int flag,\n            SIZE_T size_to, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__snprintf_chk, vsnprintf, str, size, format)\n#endif\n\nINTERCEPTOR(int, asprintf, char **strp, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)\n\n#if SANITIZER_INTERCEPT_ISOC99_PRINTF\nINTERCEPTOR(int, __isoc99_printf, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)\n\nINTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format,\n            ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format)\n\nINTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format)\n\nINTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size,\n            const char *format, ...)\nFORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,\n                        format)\n\n#endif  // SANITIZER_INTERCEPT_ISOC99_PRINTF\n\n#endif  // SANITIZER_INTERCEPT_PRINTF\n\n#if SANITIZER_INTERCEPT_PRINTF\n#define INIT_PRINTF                     \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(printf);    \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(sprintf);   \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(snprintf);  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(asprintf);  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(fprintf);   \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vprintf);   \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf);  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);\n#else\n#define INIT_PRINTF\n#endif\n\n#if SANITIZER_INTERCEPT___PRINTF_CHK\n#define INIT___PRINTF_CHK                     \\\n  COMMON_INTERCEPT_FUNCTION(__sprintf_chk);   \\\n  COMMON_INTERCEPT_FUNCTION(__snprintf_chk);  \\\n  COMMON_INTERCEPT_FUNCTION(__vsprintf_chk);  \\\n  COMMON_INTERCEPT_FUNCTION(__vsnprintf_chk); \\\n  COMMON_INTERCEPT_FUNCTION(__fprintf_chk);\n#else\n#define INIT___PRINTF_CHK\n#endif\n\n#if SANITIZER_INTERCEPT_PRINTF_L\n#define INIT_PRINTF_L                     \\\n  COMMON_INTERCEPT_FUNCTION(snprintf_l);  \\\n  COMMON_INTERCEPT_FUNCTION(vsnprintf_l);\n#else\n#define INIT_PRINTF_L\n#endif\n\n#if SANITIZER_INTERCEPT_ISOC99_PRINTF\n#define INIT_ISOC99_PRINTF                       \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_printf);    \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf);   \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf);  \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf);   \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf);   \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf);  \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \\\n  COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf);\n#else\n#define INIT_ISOC99_PRINTF\n#endif\n\n#if SANITIZER_INTERCEPT_IOCTL\n#include \"sanitizer_common_interceptors_ioctl.inc\"\n#include \"sanitizer_interceptors_ioctl_netbsd.inc\"\nINTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {\n  // We need a frame pointer, because we call into ioctl_common_[pre|post] which\n  // can trigger a report and we need to be able to unwind through this\n  // function.  On Mac in debug mode we might not have a frame pointer, because\n  // ioctl_common_[pre|post] doesn't get inlined here.\n  ENABLE_FRAME_POINTER;\n\n  void *ctx;\n  va_list ap;\n  va_start(ap, request);\n  void *arg = va_arg(ap, void *);\n  va_end(ap);\n  COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);\n\n  CHECK(ioctl_initialized);\n\n  // Note: TSan does not use common flags, and they are zero-initialized.\n  // This effectively disables ioctl handling in TSan.\n  if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);\n\n  // Although request is unsigned long, the rest of the interceptor uses it\n  // as just \"unsigned\" to save space, because we know that all values fit in\n  // \"unsigned\" - they are compile-time constants.\n\n  const ioctl_desc *desc = ioctl_lookup(request);\n  ioctl_desc decoded_desc;\n  if (!desc) {\n    VPrintf(2, \"Decoding unknown ioctl 0x%lx\\n\", request);\n    if (!ioctl_decode(request, &decoded_desc))\n      Printf(\"WARNING: failed decoding unknown ioctl 0x%lx\\n\", request);\n    else\n      desc = &decoded_desc;\n  }\n\n  if (desc) ioctl_common_pre(ctx, desc, d, request, arg);\n  int res = REAL(ioctl)(d, request, arg);\n  // FIXME: some ioctls have different return values for success and failure.\n  if (desc && res != -1) ioctl_common_post(ctx, desc, res, d, request, arg);\n  return res;\n}\n#define INIT_IOCTL \\\n  ioctl_init();    \\\n  COMMON_INTERCEPT_FUNCTION(ioctl);\n#else\n#define INIT_IOCTL\n#endif\n\n#if SANITIZER_POSIX\nUNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {\n  if (pwd) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));\n    if (pwd->pw_name)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_name,\n                                     internal_strlen(pwd->pw_name) + 1);\n    if (pwd->pw_passwd)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_passwd,\n                                     internal_strlen(pwd->pw_passwd) + 1);\n#if !SANITIZER_ANDROID\n    if (pwd->pw_gecos)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,\n                                     internal_strlen(pwd->pw_gecos) + 1);\n#endif\n#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD\n    if (pwd->pw_class)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,\n                                     internal_strlen(pwd->pw_class) + 1);\n#endif\n    if (pwd->pw_dir)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_dir,\n                                     internal_strlen(pwd->pw_dir) + 1);\n    if (pwd->pw_shell)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_shell,\n                                     internal_strlen(pwd->pw_shell) + 1);\n  }\n}\n\nUNUSED static void unpoison_group(void *ctx, __sanitizer_group *grp) {\n  if (grp) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));\n    if (grp->gr_name)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_name,\n                                     internal_strlen(grp->gr_name) + 1);\n    if (grp->gr_passwd)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_passwd,\n                                     internal_strlen(grp->gr_passwd) + 1);\n    char **p = grp->gr_mem;\n    for (; *p; ++p) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);\n    }\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_mem,\n                                   (p - grp->gr_mem + 1) * sizeof(*p));\n  }\n}\n#endif  // SANITIZER_POSIX\n\n#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS\nINTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  __sanitizer_passwd *res = REAL(getpwnam)(name);\n  unpoison_passwd(ctx, res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);\n  __sanitizer_passwd *res = REAL(getpwuid)(uid);\n  unpoison_passwd(ctx, res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  __sanitizer_group *res = REAL(getgrnam)(name);\n  unpoison_group(ctx, res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);\n  __sanitizer_group *res = REAL(getgrgid)(gid);\n  unpoison_group(ctx, res);\n  return res;\n}\n#define INIT_GETPWNAM_AND_FRIENDS      \\\n  COMMON_INTERCEPT_FUNCTION(getpwnam); \\\n  COMMON_INTERCEPT_FUNCTION(getpwuid); \\\n  COMMON_INTERCEPT_FUNCTION(getgrnam); \\\n  COMMON_INTERCEPT_FUNCTION(getgrgid);\n#else\n#define INIT_GETPWNAM_AND_FRIENDS\n#endif\n\n#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS\nINTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,\n            char *buf, SIZE_T buflen, __sanitizer_passwd **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);\n  if (!res && result)\n    unpoison_passwd(ctx, *result);\n  if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\nINTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,\n            SIZE_T buflen, __sanitizer_passwd **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);\n  if (!res && result)\n    unpoison_passwd(ctx, *result);\n  if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\nINTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,\n            char *buf, SIZE_T buflen, __sanitizer_group **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);\n  if (!res && result)\n    unpoison_group(ctx, *result);\n  if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\nINTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,\n            SIZE_T buflen, __sanitizer_group **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);\n  if (!res && result)\n    unpoison_group(ctx, *result);\n  if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\n#define INIT_GETPWNAM_R_AND_FRIENDS      \\\n  COMMON_INTERCEPT_FUNCTION(getpwnam_r); \\\n  COMMON_INTERCEPT_FUNCTION(getpwuid_r); \\\n  COMMON_INTERCEPT_FUNCTION(getgrnam_r); \\\n  COMMON_INTERCEPT_FUNCTION(getgrgid_r);\n#else\n#define INIT_GETPWNAM_R_AND_FRIENDS\n#endif\n\n#if SANITIZER_INTERCEPT_GETPWENT\nINTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy);\n  __sanitizer_passwd *res = REAL(getpwent)(dummy);\n  unpoison_passwd(ctx, res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_group *, getgrent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy);\n  __sanitizer_group *res = REAL(getgrent)(dummy);\n  unpoison_group(ctx, res);\n  return res;\n}\n#define INIT_GETPWENT                  \\\n  COMMON_INTERCEPT_FUNCTION(getpwent); \\\n  COMMON_INTERCEPT_FUNCTION(getgrent);\n#else\n#define INIT_GETPWENT\n#endif\n\n#if SANITIZER_INTERCEPT_FGETPWENT\nINTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp);\n  __sanitizer_passwd *res = REAL(fgetpwent)(fp);\n  unpoison_passwd(ctx, res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp);\n  __sanitizer_group *res = REAL(fgetgrent)(fp);\n  unpoison_group(ctx, res);\n  return res;\n}\n#define INIT_FGETPWENT                  \\\n  COMMON_INTERCEPT_FUNCTION(fgetpwent); \\\n  COMMON_INTERCEPT_FUNCTION(fgetgrent);\n#else\n#define INIT_FGETPWENT\n#endif\n\n#if SANITIZER_INTERCEPT_GETPWENT_R\nINTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,\n            SIZE_T buflen, __sanitizer_passwd **pwbufp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);\n  if (!res && pwbufp)\n    unpoison_passwd(ctx, *pwbufp);\n  if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));\n  return res;\n}\nINTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,\n            __sanitizer_group **pwbufp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);\n  if (!res && pwbufp)\n    unpoison_group(ctx, *pwbufp);\n  if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));\n  return res;\n}\n#define INIT_GETPWENT_R                   \\\n  COMMON_INTERCEPT_FUNCTION(getpwent_r);  \\\n  COMMON_INTERCEPT_FUNCTION(getgrent_r);\n#else\n#define INIT_GETPWENT_R\n#endif\n\n#if SANITIZER_INTERCEPT_FGETPWENT_R\nINTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,\n            SIZE_T buflen, __sanitizer_passwd **pwbufp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);\n  if (!res && pwbufp)\n    unpoison_passwd(ctx, *pwbufp);\n  if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));\n  return res;\n}\n#define INIT_FGETPWENT_R                  \\\n  COMMON_INTERCEPT_FUNCTION(fgetpwent_r);\n#else\n#define INIT_FGETPWENT_R\n#endif\n\n#if SANITIZER_INTERCEPT_FGETGRENT_R\nINTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,\n            SIZE_T buflen, __sanitizer_group **pwbufp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);\n  if (!res && pwbufp)\n    unpoison_group(ctx, *pwbufp);\n  if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));\n  return res;\n}\n#define INIT_FGETGRENT_R                  \\\n  COMMON_INTERCEPT_FUNCTION(fgetgrent_r);\n#else\n#define INIT_FGETGRENT_R\n#endif\n\n#if SANITIZER_INTERCEPT_SETPWENT\n// The only thing these interceptors do is disable any nested interceptors.\n// These functions may open nss modules and call uninstrumented functions from\n// them, and we don't want things like strlen() to trigger.\nINTERCEPTOR(void, setpwent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy);\n  REAL(setpwent)(dummy);\n}\nINTERCEPTOR(void, endpwent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy);\n  REAL(endpwent)(dummy);\n}\nINTERCEPTOR(void, setgrent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy);\n  REAL(setgrent)(dummy);\n}\nINTERCEPTOR(void, endgrent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy);\n  REAL(endgrent)(dummy);\n}\n#define INIT_SETPWENT                  \\\n  COMMON_INTERCEPT_FUNCTION(setpwent); \\\n  COMMON_INTERCEPT_FUNCTION(endpwent); \\\n  COMMON_INTERCEPT_FUNCTION(setgrent); \\\n  COMMON_INTERCEPT_FUNCTION(endgrent);\n#else\n#define INIT_SETPWENT\n#endif\n\n#if SANITIZER_INTERCEPT_CLOCK_GETTIME\nINTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(clock_getres)(clk_id, tp);\n  if (!res && tp) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);\n  }\n  return res;\n}\nINTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(clock_gettime)(clk_id, tp);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);\n  }\n  return res;\n}\n#if SANITIZER_GLIBC\nnamespace __sanitizer {\nextern \"C\" {\nint real_clock_gettime(u32 clk_id, void *tp) {\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_clock_gettime(clk_id, tp);\n  return REAL(clock_gettime)(clk_id, tp);\n}\n}  // extern \"C\"\n}  // namespace __sanitizer\n#endif\nINTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, tp, struct_timespec_sz);\n  return REAL(clock_settime)(clk_id, tp);\n}\n#define INIT_CLOCK_GETTIME                  \\\n  COMMON_INTERCEPT_FUNCTION(clock_getres);  \\\n  COMMON_INTERCEPT_FUNCTION(clock_gettime); \\\n  COMMON_INTERCEPT_FUNCTION(clock_settime);\n#else\n#define INIT_CLOCK_GETTIME\n#endif\n\n#if SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID\nINTERCEPTOR(int, clock_getcpuclockid, pid_t pid,\n            __sanitizer_clockid_t *clockid) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, clock_getcpuclockid, pid, clockid);\n  int res = REAL(clock_getcpuclockid)(pid, clockid);\n  if (!res && clockid) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);\n  }\n  return res;\n}\n\nINTERCEPTOR(int, pthread_getcpuclockid, uptr thread,\n            __sanitizer_clockid_t *clockid) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_getcpuclockid, thread, clockid);\n  int res = REAL(pthread_getcpuclockid)(thread, clockid);\n  if (!res && clockid) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);\n  }\n  return res;\n}\n\n#define INIT_CLOCK_GETCPUCLOCKID                   \\\n  COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);  \\\n  COMMON_INTERCEPT_FUNCTION(pthread_getcpuclockid);\n#else\n#define INIT_CLOCK_GETCPUCLOCKID\n#endif\n\n#if SANITIZER_INTERCEPT_GETITIMER\nINTERCEPTOR(int, getitimer, int which, void *curr_value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getitimer)(which, curr_value);\n  if (!res && curr_value) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);\n  }\n  return res;\n}\nINTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);\n  if (new_value) {\n    // itimerval can contain padding that may be legitimately uninitialized\n    const struct __sanitizer_itimerval *nv =\n        (const struct __sanitizer_itimerval *)new_value;\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_sec,\n                                  sizeof(__sanitizer_time_t));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_usec,\n                                  sizeof(__sanitizer_suseconds_t));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_sec,\n                                  sizeof(__sanitizer_time_t));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_usec,\n                                  sizeof(__sanitizer_suseconds_t));\n  }\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(setitimer)(which, new_value, old_value);\n  if (!res && old_value) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);\n  }\n  return res;\n}\n#define INIT_GETITIMER                  \\\n  COMMON_INTERCEPT_FUNCTION(getitimer); \\\n  COMMON_INTERCEPT_FUNCTION(setitimer);\n#else\n#define INIT_GETITIMER\n#endif\n\n#if SANITIZER_INTERCEPT_GLOB\nstatic void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob));\n  // +1 for NULL pointer at the end.\n  if (pglob->gl_pathv)\n    COMMON_INTERCEPTOR_WRITE_RANGE(\n        ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));\n  for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {\n    char *p = pglob->gl_pathv[i];\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, internal_strlen(p) + 1);\n  }\n}\n\n#if SANITIZER_SOLARIS\nINTERCEPTOR(int, glob, const char *pattern, int flags,\n            int (*errfunc)(const char *epath, int eerrno),\n            __sanitizer_glob_t *pglob) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);\n  int res = REAL(glob)(pattern, flags, errfunc, pglob);\n  if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);\n  return res;\n}\n#else\nstatic THREADLOCAL __sanitizer_glob_t *pglob_copy;\n\nstatic void wrapped_gl_closedir(void *dir) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  pglob_copy->gl_closedir(dir);\n}\n\nstatic void *wrapped_gl_readdir(void *dir) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  return pglob_copy->gl_readdir(dir);\n}\n\nstatic void *wrapped_gl_opendir(const char *s) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);\n  return pglob_copy->gl_opendir(s);\n}\n\nstatic int wrapped_gl_lstat(const char *s, void *st) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);\n  return pglob_copy->gl_lstat(s, st);\n}\n\nstatic int wrapped_gl_stat(const char *s, void *st) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);\n  return pglob_copy->gl_stat(s, st);\n}\n\nstatic const __sanitizer_glob_t kGlobCopy = {\n      0,                  0,                   0,\n      0,                  wrapped_gl_closedir, wrapped_gl_readdir,\n      wrapped_gl_opendir, wrapped_gl_lstat,    wrapped_gl_stat};\n\nINTERCEPTOR(int, glob, const char *pattern, int flags,\n            int (*errfunc)(const char *epath, int eerrno),\n            __sanitizer_glob_t *pglob) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);\n  __sanitizer_glob_t glob_copy;\n  internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));\n  if (flags & glob_altdirfunc) {\n    Swap(pglob->gl_closedir, glob_copy.gl_closedir);\n    Swap(pglob->gl_readdir, glob_copy.gl_readdir);\n    Swap(pglob->gl_opendir, glob_copy.gl_opendir);\n    Swap(pglob->gl_lstat, glob_copy.gl_lstat);\n    Swap(pglob->gl_stat, glob_copy.gl_stat);\n    pglob_copy = &glob_copy;\n  }\n  int res = REAL(glob)(pattern, flags, errfunc, pglob);\n  if (flags & glob_altdirfunc) {\n    Swap(pglob->gl_closedir, glob_copy.gl_closedir);\n    Swap(pglob->gl_readdir, glob_copy.gl_readdir);\n    Swap(pglob->gl_opendir, glob_copy.gl_opendir);\n    Swap(pglob->gl_lstat, glob_copy.gl_lstat);\n    Swap(pglob->gl_stat, glob_copy.gl_stat);\n  }\n  pglob_copy = 0;\n  if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);\n  return res;\n}\n#endif  // SANITIZER_SOLARIS\n#define INIT_GLOB                  \\\n  COMMON_INTERCEPT_FUNCTION(glob);\n#else  // SANITIZER_INTERCEPT_GLOB\n#define INIT_GLOB\n#endif  // SANITIZER_INTERCEPT_GLOB\n\n#if SANITIZER_INTERCEPT_GLOB64\nINTERCEPTOR(int, glob64, const char *pattern, int flags,\n            int (*errfunc)(const char *epath, int eerrno),\n            __sanitizer_glob_t *pglob) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);\n  __sanitizer_glob_t glob_copy;\n  internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));\n  if (flags & glob_altdirfunc) {\n    Swap(pglob->gl_closedir, glob_copy.gl_closedir);\n    Swap(pglob->gl_readdir, glob_copy.gl_readdir);\n    Swap(pglob->gl_opendir, glob_copy.gl_opendir);\n    Swap(pglob->gl_lstat, glob_copy.gl_lstat);\n    Swap(pglob->gl_stat, glob_copy.gl_stat);\n    pglob_copy = &glob_copy;\n  }\n  int res = REAL(glob64)(pattern, flags, errfunc, pglob);\n  if (flags & glob_altdirfunc) {\n    Swap(pglob->gl_closedir, glob_copy.gl_closedir);\n    Swap(pglob->gl_readdir, glob_copy.gl_readdir);\n    Swap(pglob->gl_opendir, glob_copy.gl_opendir);\n    Swap(pglob->gl_lstat, glob_copy.gl_lstat);\n    Swap(pglob->gl_stat, glob_copy.gl_stat);\n  }\n  pglob_copy = 0;\n  if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);\n  return res;\n}\n#define INIT_GLOB64                \\\n  COMMON_INTERCEPT_FUNCTION(glob64);\n#else  // SANITIZER_INTERCEPT_GLOB64\n#define INIT_GLOB64\n#endif  // SANITIZER_INTERCEPT_GLOB64\n\n#if SANITIZER_INTERCEPT_POSIX_SPAWN\n\ntemplate <class RealSpawnPtr>\nstatic int PosixSpawnImpl(void *ctx, RealSpawnPtr *real_posix_spawn, pid_t *pid,\n                          const char *file_or_path, const void *file_actions,\n                          const void *attrp, char *const argv[],\n                          char *const envp[]) {\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, file_or_path,\n                                internal_strlen(file_or_path) + 1);\n  if (argv) {\n    for (char *const *s = argv; ; ++s) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(*s));\n      if (!*s) break;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, *s, internal_strlen(*s) + 1);\n    }\n  }\n  if (envp) {\n    for (char *const *s = envp; ; ++s) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(*s));\n      if (!*s) break;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, *s, internal_strlen(*s) + 1);\n    }\n  }\n  int res =\n      real_posix_spawn(pid, file_or_path, file_actions, attrp, argv, envp);\n  if (res == 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pid, sizeof(*pid));\n  return res;\n}\nINTERCEPTOR(int, posix_spawn, pid_t *pid, const char *path,\n            const void *file_actions, const void *attrp, char *const argv[],\n            char *const envp[]) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, posix_spawn, pid, path, file_actions, attrp,\n                           argv, envp);\n  return PosixSpawnImpl(ctx, REAL(posix_spawn), pid, path, file_actions, attrp,\n                        argv, envp);\n}\nINTERCEPTOR(int, posix_spawnp, pid_t *pid, const char *file,\n            const void *file_actions, const void *attrp, char *const argv[],\n            char *const envp[]) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, posix_spawnp, pid, file, file_actions, attrp,\n                           argv, envp);\n  return PosixSpawnImpl(ctx, REAL(posix_spawnp), pid, file, file_actions, attrp,\n                        argv, envp);\n}\n#  define INIT_POSIX_SPAWN                  \\\n    COMMON_INTERCEPT_FUNCTION(posix_spawn); \\\n    COMMON_INTERCEPT_FUNCTION(posix_spawnp);\n#else  // SANITIZER_INTERCEPT_POSIX_SPAWN\n#  define INIT_POSIX_SPAWN\n#endif  // SANITIZER_INTERCEPT_POSIX_SPAWN\n\n#if SANITIZER_INTERCEPT_WAIT\n// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version\n// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for\n// details.\nINTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wait, status);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(wait)(status);\n  if (res != -1 && status)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));\n  return res;\n}\n// On FreeBSD id_t is always 64-bit wide.\n#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)\nINTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop,\n                        int options) {\n#else\nINTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,\n                        int options) {\n#endif\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(waitid)(idtype, id, infop, options);\n  if (res != -1 && infop)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);\n  return res;\n}\nINTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(waitpid)(pid, status, options);\n  if (res != -1 && status)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));\n  return res;\n}\nINTERCEPTOR(int, wait3, int *status, int options, void *rusage) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(wait3)(status, options, rusage);\n  if (res != -1) {\n    if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));\n    if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);\n  }\n  return res;\n}\n#if SANITIZER_ANDROID\nINTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(__wait4)(pid, status, options, rusage);\n  if (res != -1) {\n    if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));\n    if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);\n  }\n  return res;\n}\n#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4);\n#else\nINTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(wait4)(pid, status, options, rusage);\n  if (res != -1) {\n    if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));\n    if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);\n  }\n  return res;\n}\n#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4);\n#endif  // SANITIZER_ANDROID\n#define INIT_WAIT                     \\\n  COMMON_INTERCEPT_FUNCTION(wait);    \\\n  COMMON_INTERCEPT_FUNCTION(waitid);  \\\n  COMMON_INTERCEPT_FUNCTION(waitpid); \\\n  COMMON_INTERCEPT_FUNCTION(wait3);\n#else\n#define INIT_WAIT\n#define INIT_WAIT4\n#endif\n\n#if SANITIZER_INTERCEPT_INET\nINTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, inet_ntop, af, src, dst, size);\n  uptr sz = __sanitizer_in_addr_sz(af);\n  if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);\n  // FIXME: figure out read size based on the address family.\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(inet_ntop)(af, src, dst, size);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\nINTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, src, 0);\n  // FIXME: figure out read size based on the address family.\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(inet_pton)(af, src, dst);\n  if (res == 1) {\n    uptr sz = __sanitizer_in_addr_sz(af);\n    if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);\n  }\n  return res;\n}\n#define INIT_INET                       \\\n  COMMON_INTERCEPT_FUNCTION(inet_ntop); \\\n  COMMON_INTERCEPT_FUNCTION(inet_pton);\n#else\n#define INIT_INET\n#endif\n\n#if SANITIZER_INTERCEPT_INET\nINTERCEPTOR(int, inet_aton, const char *cp, void *dst) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);\n  if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, internal_strlen(cp) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(inet_aton)(cp, dst);\n  if (res != 0) {\n    uptr sz = __sanitizer_in_addr_sz(af_inet);\n    if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);\n  }\n  return res;\n}\n#define INIT_INET_ATON COMMON_INTERCEPT_FUNCTION(inet_aton);\n#else\n#define INIT_INET_ATON\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM\nINTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(pthread_getschedparam)(thread, policy, param);\n  if (res == 0) {\n    if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));\n    if (param) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, sizeof(*param));\n  }\n  return res;\n}\n#define INIT_PTHREAD_GETSCHEDPARAM \\\n  COMMON_INTERCEPT_FUNCTION(pthread_getschedparam);\n#else\n#define INIT_PTHREAD_GETSCHEDPARAM\n#endif\n\n#if SANITIZER_INTERCEPT_GETADDRINFO\nINTERCEPTOR(int, getaddrinfo, char *node, char *service,\n            struct __sanitizer_addrinfo *hints,\n            struct __sanitizer_addrinfo **out) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);\n  if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, internal_strlen(node) + 1);\n  if (service)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, service, internal_strlen(service) + 1);\n  if (hints)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getaddrinfo)(node, service, hints, out);\n  if (res == 0 && out) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));\n    struct __sanitizer_addrinfo *p = *out;\n    while (p) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));\n      if (p->ai_addr)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen);\n      if (p->ai_canonname)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,\n                                       internal_strlen(p->ai_canonname) + 1);\n      p = p->ai_next;\n    }\n  }\n  return res;\n}\n#define INIT_GETADDRINFO COMMON_INTERCEPT_FUNCTION(getaddrinfo);\n#else\n#define INIT_GETADDRINFO\n#endif\n\n#if SANITIZER_INTERCEPT_GETNAMEINFO\nINTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,\n            unsigned hostlen, char *serv, unsigned servlen, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getnameinfo, sockaddr, salen, host, hostlen,\n                           serv, servlen, flags);\n  // FIXME: consider adding READ_RANGE(sockaddr, salen)\n  // There is padding in in_addr that may make this too noisy\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res =\n      REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);\n  if (res == 0) {\n    if (host && hostlen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, internal_strlen(host) + 1);\n    if (serv && servlen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, internal_strlen(serv) + 1);\n  }\n  return res;\n}\n#define INIT_GETNAMEINFO COMMON_INTERCEPT_FUNCTION(getnameinfo);\n#else\n#define INIT_GETNAMEINFO\n#endif\n\n#if SANITIZER_INTERCEPT_GETSOCKNAME\nINTERCEPTOR(int, getsockname, int sock_fd, void *addr, unsigned *addrlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);\n  unsigned addr_sz;\n  if (addrlen) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));\n    addr_sz = *addrlen;\n  }\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getsockname)(sock_fd, addr, addrlen);\n  if (!res && addr && addrlen) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));\n  }\n  return res;\n}\n#define INIT_GETSOCKNAME COMMON_INTERCEPT_FUNCTION(getsockname);\n#else\n#define INIT_GETSOCKNAME\n#endif\n\n#if SANITIZER_INTERCEPT_GETHOSTBYNAME || SANITIZER_INTERCEPT_GETHOSTBYNAME_R\nstatic void write_hostent(void *ctx, struct __sanitizer_hostent *h) {\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));\n  if (h->h_name)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, internal_strlen(h->h_name) + 1);\n  char **p = h->h_aliases;\n  while (*p) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);\n    ++p;\n  }\n  COMMON_INTERCEPTOR_WRITE_RANGE(\n      ctx, h->h_aliases, (p - h->h_aliases + 1) * sizeof(*h->h_aliases));\n  p = h->h_addr_list;\n  while (*p) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, h->h_length);\n    ++p;\n  }\n  COMMON_INTERCEPTOR_WRITE_RANGE(\n      ctx, h->h_addr_list, (p - h->h_addr_list + 1) * sizeof(*h->h_addr_list));\n}\n#endif\n\n#if SANITIZER_INTERCEPT_GETHOSTBYNAME\nINTERCEPTOR(struct __sanitizer_hostent *, gethostbyname, char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname, name);\n  struct __sanitizer_hostent *res = REAL(gethostbyname)(name);\n  if (res) write_hostent(ctx, res);\n  return res;\n}\n\nINTERCEPTOR(struct __sanitizer_hostent *, gethostbyaddr, void *addr, int len,\n            int type) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr, addr, len, type);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);\n  struct __sanitizer_hostent *res = REAL(gethostbyaddr)(addr, len, type);\n  if (res) write_hostent(ctx, res);\n  return res;\n}\n\nINTERCEPTOR(struct __sanitizer_hostent *, gethostent, int fake) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostent, fake);\n  struct __sanitizer_hostent *res = REAL(gethostent)(fake);\n  if (res) write_hostent(ctx, res);\n  return res;\n}\n#define INIT_GETHOSTBYNAME                  \\\n  COMMON_INTERCEPT_FUNCTION(gethostent);    \\\n  COMMON_INTERCEPT_FUNCTION(gethostbyaddr); \\\n  COMMON_INTERCEPT_FUNCTION(gethostbyname);\n#else\n#define INIT_GETHOSTBYNAME\n#endif  // SANITIZER_INTERCEPT_GETHOSTBYNAME\n\n#if SANITIZER_INTERCEPT_GETHOSTBYNAME2\nINTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2, name, af);\n  struct __sanitizer_hostent *res = REAL(gethostbyname2)(name, af);\n  if (res) write_hostent(ctx, res);\n  return res;\n}\n#define INIT_GETHOSTBYNAME2 COMMON_INTERCEPT_FUNCTION(gethostbyname2);\n#else\n#define INIT_GETHOSTBYNAME2\n#endif  // SANITIZER_INTERCEPT_GETHOSTBYNAME2\n\n#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R\nINTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,\n            char *buf, SIZE_T buflen, __sanitizer_hostent **result,\n            int *h_errnop) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,\n                           h_errnop);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);\n  if (result) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n    if (res == 0 && *result) write_hostent(ctx, *result);\n  }\n  if (h_errnop)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));\n  return res;\n}\n#define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r);\n#else\n#define INIT_GETHOSTBYNAME_R\n#endif\n\n#if SANITIZER_INTERCEPT_GETHOSTENT_R\nINTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,\n            SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,\n                           h_errnop);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);\n  if (result) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n    if (res == 0 && *result) write_hostent(ctx, *result);\n  }\n  if (h_errnop)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));\n  return res;\n}\n#define INIT_GETHOSTENT_R                  \\\n  COMMON_INTERCEPT_FUNCTION(gethostent_r);\n#else\n#define INIT_GETHOSTENT_R\n#endif\n\n#if SANITIZER_INTERCEPT_GETHOSTBYADDR_R\nINTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,\n            struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,\n            __sanitizer_hostent **result, int *h_errnop) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,\n                           buflen, result, h_errnop);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,\n                                  h_errnop);\n  if (result) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n    if (res == 0 && *result) write_hostent(ctx, *result);\n  }\n  if (h_errnop)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));\n  return res;\n}\n#define INIT_GETHOSTBYADDR_R                  \\\n  COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r);\n#else\n#define INIT_GETHOSTBYADDR_R\n#endif\n\n#if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R\nINTERCEPTOR(int, gethostbyname2_r, char *name, int af,\n            struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,\n            __sanitizer_hostent **result, int *h_errnop) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,\n                           result, h_errnop);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res =\n      REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);\n  if (result) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n    if (res == 0 && *result) write_hostent(ctx, *result);\n  }\n  if (h_errnop)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));\n  return res;\n}\n#define INIT_GETHOSTBYNAME2_R                  \\\n  COMMON_INTERCEPT_FUNCTION(gethostbyname2_r);\n#else\n#define INIT_GETHOSTBYNAME2_R\n#endif\n\n#if SANITIZER_INTERCEPT_GETSOCKOPT\nINTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,\n            int *optlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,\n                           optlen);\n  if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);\n  if (res == 0)\n    if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);\n  return res;\n}\n#define INIT_GETSOCKOPT COMMON_INTERCEPT_FUNCTION(getsockopt);\n#else\n#define INIT_GETSOCKOPT\n#endif\n\n#if SANITIZER_INTERCEPT_ACCEPT\nINTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, accept, fd, addr, addrlen);\n  unsigned addrlen0 = 0;\n  if (addrlen) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));\n    addrlen0 = *addrlen;\n  }\n  int fd2 = REAL(accept)(fd, addr, addrlen);\n  if (fd2 >= 0) {\n    if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);\n    if (addr && addrlen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));\n  }\n  return fd2;\n}\n#define INIT_ACCEPT COMMON_INTERCEPT_FUNCTION(accept);\n#else\n#define INIT_ACCEPT\n#endif\n\n#if SANITIZER_INTERCEPT_ACCEPT4\nINTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, accept4, fd, addr, addrlen, f);\n  unsigned addrlen0 = 0;\n  if (addrlen) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));\n    addrlen0 = *addrlen;\n  }\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int fd2 = REAL(accept4)(fd, addr, addrlen, f);\n  if (fd2 >= 0) {\n    if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);\n    if (addr && addrlen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));\n  }\n  return fd2;\n}\n#define INIT_ACCEPT4 COMMON_INTERCEPT_FUNCTION(accept4);\n#else\n#define INIT_ACCEPT4\n#endif\n\n#if SANITIZER_INTERCEPT_PACCEPT\nINTERCEPTOR(int, paccept, int fd, void *addr, unsigned *addrlen,\n            __sanitizer_sigset_t *set, int f) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, paccept, fd, addr, addrlen, set, f);\n  unsigned addrlen0 = 0;\n  if (addrlen) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));\n    addrlen0 = *addrlen;\n  }\n  if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));\n  int fd2 = REAL(paccept)(fd, addr, addrlen, set, f);\n  if (fd2 >= 0) {\n    if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);\n    if (addr && addrlen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));\n  }\n  return fd2;\n}\n#define INIT_PACCEPT COMMON_INTERCEPT_FUNCTION(paccept);\n#else\n#define INIT_PACCEPT\n#endif\n\n#if SANITIZER_INTERCEPT_MODF\nINTERCEPTOR(double, modf, double x, double *iptr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  double res = REAL(modf)(x, iptr);\n  if (iptr) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));\n  }\n  return res;\n}\nINTERCEPTOR(float, modff, float x, float *iptr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  float res = REAL(modff)(x, iptr);\n  if (iptr) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));\n  }\n  return res;\n}\nINTERCEPTOR(long double, modfl, long double x, long double *iptr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  long double res = REAL(modfl)(x, iptr);\n  if (iptr) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));\n  }\n  return res;\n}\n#define INIT_MODF                   \\\n  COMMON_INTERCEPT_FUNCTION(modf);  \\\n  COMMON_INTERCEPT_FUNCTION(modff); \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(modfl);\n#else\n#define INIT_MODF\n#endif\n\n#if SANITIZER_INTERCEPT_RECVMSG || SANITIZER_INTERCEPT_RECVMMSG\nstatic void write_msghdr(void *ctx, struct __sanitizer_msghdr *msg,\n                         SSIZE_T maxlen) {\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg, sizeof(*msg));\n  if (msg->msg_name && msg->msg_namelen)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_name, msg->msg_namelen);\n  if (msg->msg_iov && msg->msg_iovlen)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_iov,\n                                   sizeof(*msg->msg_iov) * msg->msg_iovlen);\n  write_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);\n  if (msg->msg_control && msg->msg_controllen)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_control, msg->msg_controllen);\n}\n#endif\n\n#if SANITIZER_INTERCEPT_RECVMSG\nINTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,\n            int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(recvmsg)(fd, msg, flags);\n  if (res >= 0) {\n    if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n    if (msg) {\n      write_msghdr(ctx, msg, res);\n      COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg);\n    }\n  }\n  return res;\n}\n#define INIT_RECVMSG COMMON_INTERCEPT_FUNCTION(recvmsg);\n#else\n#define INIT_RECVMSG\n#endif\n\n#if SANITIZER_INTERCEPT_RECVMMSG\nINTERCEPTOR(int, recvmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,\n            unsigned int vlen, int flags, void *timeout) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, recvmmsg, fd, msgvec, vlen, flags, timeout);\n  if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);\n  int res = REAL(recvmmsg)(fd, msgvec, vlen, flags, timeout);\n  if (res >= 0) {\n    if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n    for (int i = 0; i < res; ++i) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,\n                                     sizeof(msgvec[i].msg_len));\n      write_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len);\n      COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, &msgvec[i].msg_hdr);\n    }\n  }\n  return res;\n}\n#define INIT_RECVMMSG COMMON_INTERCEPT_FUNCTION(recvmmsg);\n#else\n#define INIT_RECVMMSG\n#endif\n\n#if SANITIZER_INTERCEPT_SENDMSG || SANITIZER_INTERCEPT_SENDMMSG\nstatic void read_msghdr_control(void *ctx, void *control, uptr controllen) {\n  const unsigned kCmsgDataOffset =\n      RoundUpTo(sizeof(__sanitizer_cmsghdr), sizeof(uptr));\n\n  char *p = (char *)control;\n  char *const control_end = p + controllen;\n  while (true) {\n    if (p + sizeof(__sanitizer_cmsghdr) > control_end) break;\n    __sanitizer_cmsghdr *cmsg = (__sanitizer_cmsghdr *)p;\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_len, sizeof(cmsg->cmsg_len));\n\n    if (p + RoundUpTo(cmsg->cmsg_len, sizeof(uptr)) > control_end) break;\n\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_level,\n                                  sizeof(cmsg->cmsg_level));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_type,\n                                  sizeof(cmsg->cmsg_type));\n\n    if (cmsg->cmsg_len > kCmsgDataOffset) {\n      char *data = p + kCmsgDataOffset;\n      unsigned data_len = cmsg->cmsg_len - kCmsgDataOffset;\n      if (data_len > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, data_len);\n    }\n\n    p += RoundUpTo(cmsg->cmsg_len, sizeof(uptr));\n  }\n}\n\nstatic void read_msghdr(void *ctx, struct __sanitizer_msghdr *msg,\n                        SSIZE_T maxlen) {\n#define R(f) \\\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &msg->msg_##f, sizeof(msg->msg_##f))\n  R(name);\n  R(namelen);\n  R(iov);\n  R(iovlen);\n  R(control);\n  R(controllen);\n  R(flags);\n#undef R\n  if (msg->msg_name && msg->msg_namelen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_name, msg->msg_namelen);\n  if (msg->msg_iov && msg->msg_iovlen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_iov,\n                                  sizeof(*msg->msg_iov) * msg->msg_iovlen);\n  read_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);\n  if (msg->msg_control && msg->msg_controllen)\n    read_msghdr_control(ctx, msg->msg_control, msg->msg_controllen);\n}\n#endif\n\n#if SANITIZER_INTERCEPT_SENDMSG\nINTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg,\n            int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sendmsg, fd, msg, flags);\n  if (fd >= 0) {\n    COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n    COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  }\n  SSIZE_T res = REAL(sendmsg)(fd, msg, flags);\n  if (common_flags()->intercept_send && res >= 0 && msg)\n    read_msghdr(ctx, msg, res);\n  return res;\n}\n#define INIT_SENDMSG COMMON_INTERCEPT_FUNCTION(sendmsg);\n#else\n#define INIT_SENDMSG\n#endif\n\n#if SANITIZER_INTERCEPT_SENDMMSG\nINTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,\n            unsigned vlen, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sendmmsg, fd, msgvec, vlen, flags);\n  if (fd >= 0) {\n    COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n    COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  }\n  int res = REAL(sendmmsg)(fd, msgvec, vlen, flags);\n  if (res >= 0 && msgvec) {\n    for (int i = 0; i < res; ++i) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,\n                                     sizeof(msgvec[i].msg_len));\n      if (common_flags()->intercept_send)\n        read_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len);\n    }\n  }\n  return res;\n}\n#define INIT_SENDMMSG COMMON_INTERCEPT_FUNCTION(sendmmsg);\n#else\n#define INIT_SENDMMSG\n#endif\n\n#if SANITIZER_INTERCEPT_SYSMSG\nINTERCEPTOR(int, msgsnd, int msqid, const void *msgp, SIZE_T msgsz,\n            int msgflg) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, msgsnd, msqid, msgp, msgsz, msgflg);\n  if (msgp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, msgp, sizeof(long) + msgsz);\n  int res = REAL(msgsnd)(msqid, msgp, msgsz, msgflg);\n  return res;\n}\n\nINTERCEPTOR(SSIZE_T, msgrcv, int msqid, void *msgp, SIZE_T msgsz,\n            long msgtyp, int msgflg) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, msgrcv, msqid, msgp, msgsz, msgtyp, msgflg);\n  SSIZE_T len = REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg);\n  if (len != -1)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msgp, sizeof(long) + len);\n  return len;\n}\n\n#define INIT_SYSMSG                  \\\n  COMMON_INTERCEPT_FUNCTION(msgsnd); \\\n  COMMON_INTERCEPT_FUNCTION(msgrcv);\n#else\n#define INIT_SYSMSG\n#endif\n\n#if SANITIZER_INTERCEPT_GETPEERNAME\nINTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);\n  unsigned addr_sz;\n  if (addrlen) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));\n    addr_sz = *addrlen;\n  }\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getpeername)(sockfd, addr, addrlen);\n  if (!res && addr && addrlen) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));\n  }\n  return res;\n}\n#define INIT_GETPEERNAME COMMON_INTERCEPT_FUNCTION(getpeername);\n#else\n#define INIT_GETPEERNAME\n#endif\n\n#if SANITIZER_INTERCEPT_SYSINFO\nINTERCEPTOR(int, sysinfo, void *info) {\n  void *ctx;\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);\n  int res = REAL(sysinfo)(info);\n  if (!res && info)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, struct_sysinfo_sz);\n  return res;\n}\n#define INIT_SYSINFO COMMON_INTERCEPT_FUNCTION(sysinfo);\n#else\n#define INIT_SYSINFO\n#endif\n\n#if SANITIZER_INTERCEPT_READDIR\nINTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  __sanitizer_dirent *res = REAL(opendir)(path);\n  if (res)\n    COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);\n  return res;\n}\n\nINTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  __sanitizer_dirent *res = REAL(readdir)(dirp);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);\n  return res;\n}\n\nINTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,\n            __sanitizer_dirent **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(readdir_r)(dirp, entry, result);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n    if (*result)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);\n  }\n  return res;\n}\n\n#define INIT_READDIR                  \\\n  COMMON_INTERCEPT_FUNCTION(opendir); \\\n  COMMON_INTERCEPT_FUNCTION(readdir); \\\n  COMMON_INTERCEPT_FUNCTION(readdir_r);\n#else\n#define INIT_READDIR\n#endif\n\n#if SANITIZER_INTERCEPT_READDIR64\nINTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  __sanitizer_dirent64 *res = REAL(readdir64)(dirp);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);\n  return res;\n}\n\nINTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,\n            __sanitizer_dirent64 **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(readdir64_r)(dirp, entry, result);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n    if (*result)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);\n  }\n  return res;\n}\n#define INIT_READDIR64                  \\\n  COMMON_INTERCEPT_FUNCTION(readdir64); \\\n  COMMON_INTERCEPT_FUNCTION(readdir64_r);\n#else\n#define INIT_READDIR64\n#endif\n\n#if SANITIZER_INTERCEPT_PTRACE\nINTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ptrace, request, pid, addr, data);\n  __sanitizer_iovec local_iovec;\n\n  if (data) {\n    if (request == ptrace_setregs) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_regs_struct_sz);\n    } else if (request == ptrace_setfpregs) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpregs_struct_sz);\n    } else if (request == ptrace_setfpxregs) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpxregs_struct_sz);\n    } else if (request == ptrace_setvfpregs) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_vfpregs_struct_sz);\n    } else if (request == ptrace_setsiginfo) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, siginfo_t_sz);\n\n    // Some kernel might zero the iovec::iov_base in case of invalid\n    // write access.  In this case copy the invalid address for further\n    // inspection.\n    } else if (request == ptrace_setregset || request == ptrace_getregset) {\n      __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec));\n      local_iovec = *iovec;\n      if (request == ptrace_setregset)\n        COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec->iov_base, iovec->iov_len);\n    }\n  }\n\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  uptr res = REAL(ptrace)(request, pid, addr, data);\n\n  if (!res && data) {\n    // Note that PEEK* requests assign different meaning to the return value.\n    // This function does not handle them (nor does it need to).\n    if (request == ptrace_getregs) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz);\n    } else if (request == ptrace_getfpregs) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpregs_struct_sz);\n    } else if (request == ptrace_getfpxregs) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz);\n    } else if (request == ptrace_getvfpregs) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_vfpregs_struct_sz);\n    } else if (request == ptrace_getsiginfo) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz);\n    } else if (request == ptrace_geteventmsg) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long));\n    } else if (request == ptrace_getregset) {\n      __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec, sizeof(*iovec));\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, local_iovec.iov_base,\n                                     local_iovec.iov_len);\n    }\n  }\n  return res;\n}\n\n#define INIT_PTRACE COMMON_INTERCEPT_FUNCTION(ptrace);\n#else\n#define INIT_PTRACE\n#endif\n\n#if SANITIZER_INTERCEPT_SETLOCALE\nstatic void unpoison_ctype_arrays(void *ctx) {\n#if SANITIZER_NETBSD\n  // These arrays contain 256 regular elements in unsigned char range + 1 EOF\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _ctype_tab_, 257 * sizeof(short));\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _toupper_tab_, 257 * sizeof(short));\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _tolower_tab_, 257 * sizeof(short));\n#endif\n}\n\nINTERCEPTOR(char *, setlocale, int category, char *locale) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale);\n  if (locale)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, internal_strlen(locale) + 1);\n  char *res = REAL(setlocale)(category, locale);\n  if (res) {\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n    unpoison_ctype_arrays(ctx);\n  }\n  return res;\n}\n\n#define INIT_SETLOCALE COMMON_INTERCEPT_FUNCTION(setlocale);\n#else\n#define INIT_SETLOCALE\n#endif\n\n#if SANITIZER_INTERCEPT_GETCWD\nINTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(getcwd)(buf, size);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd);\n#else\n#define INIT_GETCWD\n#endif\n\n#if SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME\nINTERCEPTOR(char *, get_current_dir_name, int fake) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(get_current_dir_name)(fake);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\n\n#define INIT_GET_CURRENT_DIR_NAME \\\n  COMMON_INTERCEPT_FUNCTION(get_current_dir_name);\n#else\n#define INIT_GET_CURRENT_DIR_NAME\n#endif\n\nUNUSED static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {\n  CHECK(endptr);\n  if (nptr == *endptr) {\n    // No digits were found at strtol call, we need to find out the last\n    // symbol accessed by strtoll on our own.\n    // We get this symbol by skipping leading blanks and optional +/- sign.\n    while (IsSpace(*nptr)) nptr++;\n    if (*nptr == '+' || *nptr == '-') nptr++;\n    *endptr = const_cast<char *>(nptr);\n  }\n  CHECK(*endptr >= nptr);\n}\n\nUNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,\n                             char **endptr, char *real_endptr, int base) {\n  if (endptr) {\n    *endptr = real_endptr;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));\n  }\n  // If base has unsupported value, strtol can exit with EINVAL\n  // without reading any characters. So do additional checks only\n  // if base is valid.\n  bool is_valid_base = (base == 0) || (2 <= base && base <= 36);\n  if (is_valid_base) {\n    FixRealStrtolEndptr(nptr, &real_endptr);\n  }\n  COMMON_INTERCEPTOR_READ_STRING(ctx, nptr, is_valid_base ?\n                                 (real_endptr - nptr) + 1 : 0);\n}\n\n\n#if SANITIZER_INTERCEPT_STRTOIMAX\nINTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *real_endptr;\n  INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);\n  StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);\n  return res;\n}\n\nINTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *real_endptr;\n  UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);\n  StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);\n  return res;\n}\n\n#define INIT_STRTOIMAX                  \\\n  COMMON_INTERCEPT_FUNCTION(strtoimax); \\\n  COMMON_INTERCEPT_FUNCTION(strtoumax);\n#else\n#define INIT_STRTOIMAX\n#endif\n\n#if SANITIZER_INTERCEPT_MBSTOWCS\nINTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(mbstowcs)(dest, src, len);\n  if (res != (SIZE_T) - 1 && dest) {\n    SIZE_T write_cnt = res + (res < len);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));\n  }\n  return res;\n}\n\nINTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len,\n            void *ps) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps);\n  if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));\n  if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);\n  if (res != (SIZE_T)(-1) && dest && src) {\n    // This function, and several others, may or may not write the terminating\n    // \\0 character. They write it iff they clear *src.\n    SIZE_T write_cnt = res + !*src;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));\n  }\n  return res;\n}\n\n#define INIT_MBSTOWCS                  \\\n  COMMON_INTERCEPT_FUNCTION(mbstowcs); \\\n  COMMON_INTERCEPT_FUNCTION(mbsrtowcs);\n#else\n#define INIT_MBSTOWCS\n#endif\n\n#if SANITIZER_INTERCEPT_MBSNRTOWCS\nINTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,\n            SIZE_T len, void *ps) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, mbsnrtowcs, dest, src, nms, len, ps);\n  if (src) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));\n    if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);\n  }\n  if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);\n  if (res != (SIZE_T)(-1) && dest && src) {\n    SIZE_T write_cnt = res + !*src;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));\n  }\n  return res;\n}\n\n#define INIT_MBSNRTOWCS COMMON_INTERCEPT_FUNCTION(mbsnrtowcs);\n#else\n#define INIT_MBSNRTOWCS\n#endif\n\n#if SANITIZER_INTERCEPT_WCSTOMBS\nINTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(wcstombs)(dest, src, len);\n  if (res != (SIZE_T) - 1 && dest) {\n    SIZE_T write_cnt = res + (res < len);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);\n  }\n  return res;\n}\n\nINTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len,\n            void *ps) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps);\n  if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));\n  if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);\n  if (res != (SIZE_T) - 1 && dest && src) {\n    SIZE_T write_cnt = res + !*src;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);\n  }\n  return res;\n}\n\n#define INIT_WCSTOMBS                  \\\n  COMMON_INTERCEPT_FUNCTION(wcstombs); \\\n  COMMON_INTERCEPT_FUNCTION(wcsrtombs);\n#else\n#define INIT_WCSTOMBS\n#endif\n\n#if SANITIZER_INTERCEPT_WCSNRTOMBS\nINTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,\n            SIZE_T len, void *ps) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcsnrtombs, dest, src, nms, len, ps);\n  if (src) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));\n    if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);\n  }\n  if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);\n  if (res != ((SIZE_T)-1) && dest && src) {\n    SIZE_T write_cnt = res + !*src;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);\n  }\n  return res;\n}\n\n#define INIT_WCSNRTOMBS COMMON_INTERCEPT_FUNCTION(wcsnrtombs);\n#else\n#define INIT_WCSNRTOMBS\n#endif\n\n\n#if SANITIZER_INTERCEPT_WCRTOMB\nINTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcrtomb, dest, src, ps);\n  if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);\n\n  if (!dest)\n    return REAL(wcrtomb)(dest, src, ps);\n\n  char local_dest[32];\n  SIZE_T res = REAL(wcrtomb)(local_dest, src, ps);\n  if (res != ((SIZE_T)-1)) {\n    CHECK_LE(res, sizeof(local_dest));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);\n    REAL(memcpy)(dest, local_dest, res);\n  }\n  return res;\n}\n\n#define INIT_WCRTOMB COMMON_INTERCEPT_FUNCTION(wcrtomb);\n#else\n#define INIT_WCRTOMB\n#endif\n\n#if SANITIZER_INTERCEPT_WCTOMB\nINTERCEPTOR(int, wctomb, char *dest, wchar_t src) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wctomb, dest, src);\n  if (!dest)\n    return REAL(wctomb)(dest, src);\n\n  char local_dest[32];\n  int res = REAL(wctomb)(local_dest, src);\n  if (res != -1) {\n    CHECK_LE(res, sizeof(local_dest));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);\n    REAL(memcpy)(dest, local_dest, res);\n  }\n  return res;\n}\n\n#define INIT_WCTOMB COMMON_INTERCEPT_FUNCTION(wctomb);\n#else\n#define INIT_WCTOMB\n#endif\n\n#if SANITIZER_INTERCEPT_TCGETATTR\nINTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(tcgetattr)(fd, termios_p);\n  if (!res && termios_p)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);\n  return res;\n}\n\n#define INIT_TCGETATTR COMMON_INTERCEPT_FUNCTION(tcgetattr);\n#else\n#define INIT_TCGETATTR\n#endif\n\n#if SANITIZER_INTERCEPT_REALPATH\nINTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n\n  // Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest\n  // version of a versioned symbol. For realpath(), this gives us something\n  // (called __old_realpath) that does not handle NULL in the second argument.\n  // Handle it as part of the interceptor.\n  char *allocated_path = nullptr;\n  if (!resolved_path)\n    allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);\n\n  char *res = REAL(realpath)(path, resolved_path);\n  if (allocated_path && !res)\n    WRAP(free)(allocated_path);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\n#  define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);\n#else\n#define INIT_REALPATH\n#endif\n\n#if SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME\nINTERCEPTOR(char *, canonicalize_file_name, const char *path) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  char *res = REAL(canonicalize_file_name)(path);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_CANONICALIZE_FILE_NAME \\\n  COMMON_INTERCEPT_FUNCTION(canonicalize_file_name);\n#else\n#define INIT_CANONICALIZE_FILE_NAME\n#endif\n\n#if SANITIZER_INTERCEPT_CONFSTR\nINTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(confstr)(name, buf, len);\n  if (buf && res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);\n  return res;\n}\n#define INIT_CONFSTR COMMON_INTERCEPT_FUNCTION(confstr);\n#else\n#define INIT_CONFSTR\n#endif\n\n#if SANITIZER_INTERCEPT_SCHED_GETAFFINITY\nINTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);\n  if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);\n  return res;\n}\n#define INIT_SCHED_GETAFFINITY COMMON_INTERCEPT_FUNCTION(sched_getaffinity);\n#else\n#define INIT_SCHED_GETAFFINITY\n#endif\n\n#if SANITIZER_INTERCEPT_SCHED_GETPARAM\nINTERCEPTOR(int, sched_getparam, int pid, void *param) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);\n  int res = REAL(sched_getparam)(pid, param);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);\n  return res;\n}\n#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);\n#else\n#define INIT_SCHED_GETPARAM\n#endif\n\n#if SANITIZER_INTERCEPT_STRERROR\nINTERCEPTOR(char *, strerror, int errnum) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);\n  COMMON_INTERCEPTOR_STRERROR();\n  char *res = REAL(strerror)(errnum);\n  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);\n#else\n#define INIT_STRERROR\n#endif\n\n#if SANITIZER_INTERCEPT_STRERROR_R\n// There are 2 versions of strerror_r:\n//  * POSIX version returns 0 on success, negative error code on failure,\n//    writes message to buf.\n//  * GNU version returns message pointer, which points to either buf or some\n//    static storage.\n#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \\\n    SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD ||                 \\\n    SANITIZER_FREEBSD\n// POSIX version. Spec is not clear on whether buf is NULL-terminated.\n// At least on OSX, buf contents are valid even when the call fails.\nINTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(strerror_r)(errnum, buf, buflen);\n\n  SIZE_T sz = internal_strnlen(buf, buflen);\n  if (sz < buflen) ++sz;\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);\n  return res;\n}\n#else\n// GNU version.\nINTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(strerror_r)(errnum, buf, buflen);\n  if (res == buf)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  else\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\n#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||\n       //SANITIZER_MAC\n#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);\n#else\n#define INIT_STRERROR_R\n#endif\n\n#if SANITIZER_INTERCEPT_XPG_STRERROR_R\nINTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);\n  // This version always returns a null-terminated string.\n  if (buf && buflen)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);\n  return res;\n}\n#define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r);\n#else\n#define INIT_XPG_STRERROR_R\n#endif\n\n#if SANITIZER_INTERCEPT_SCANDIR\ntypedef int (*scandir_filter_f)(const struct __sanitizer_dirent *);\ntypedef int (*scandir_compar_f)(const struct __sanitizer_dirent **,\n                                const struct __sanitizer_dirent **);\n\nstatic THREADLOCAL scandir_filter_f scandir_filter;\nstatic THREADLOCAL scandir_compar_f scandir_compar;\n\nstatic int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);\n  return scandir_filter(dir);\n}\n\nstatic int wrapped_scandir_compar(const struct __sanitizer_dirent **a,\n                                  const struct __sanitizer_dirent **b) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);\n  return scandir_compar(a, b);\n}\n\nINTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,\n            scandir_filter_f filter, scandir_compar_f compar) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);\n  if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);\n  scandir_filter = filter;\n  scandir_compar = compar;\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(scandir)(dirp, namelist,\n                          filter ? wrapped_scandir_filter : nullptr,\n                          compar ? wrapped_scandir_compar : nullptr);\n  scandir_filter = nullptr;\n  scandir_compar = nullptr;\n  if (namelist && res > 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);\n    for (int i = 0; i < res; ++i)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],\n                                     (*namelist)[i]->d_reclen);\n  }\n  return res;\n}\n#define INIT_SCANDIR COMMON_INTERCEPT_FUNCTION(scandir);\n#else\n#define INIT_SCANDIR\n#endif\n\n#if SANITIZER_INTERCEPT_SCANDIR64\ntypedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *);\ntypedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **,\n                                  const struct __sanitizer_dirent64 **);\n\nstatic THREADLOCAL scandir64_filter_f scandir64_filter;\nstatic THREADLOCAL scandir64_compar_f scandir64_compar;\n\nstatic int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);\n  return scandir64_filter(dir);\n}\n\nstatic int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,\n                                    const struct __sanitizer_dirent64 **b) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);\n  return scandir64_compar(a, b);\n}\n\nINTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,\n            scandir64_filter_f filter, scandir64_compar_f compar) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);\n  if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);\n  scandir64_filter = filter;\n  scandir64_compar = compar;\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res =\n      REAL(scandir64)(dirp, namelist,\n                      filter ? wrapped_scandir64_filter : nullptr,\n                      compar ? wrapped_scandir64_compar : nullptr);\n  scandir64_filter = nullptr;\n  scandir64_compar = nullptr;\n  if (namelist && res > 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);\n    for (int i = 0; i < res; ++i)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],\n                                     (*namelist)[i]->d_reclen);\n  }\n  return res;\n}\n#define INIT_SCANDIR64 COMMON_INTERCEPT_FUNCTION(scandir64);\n#else\n#define INIT_SCANDIR64\n#endif\n\n#if SANITIZER_INTERCEPT_GETGROUPS\nINTERCEPTOR(int, getgroups, int size, u32 *lst) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getgroups)(size, lst);\n  if (res >= 0 && lst && size > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));\n  return res;\n}\n#define INIT_GETGROUPS COMMON_INTERCEPT_FUNCTION(getgroups);\n#else\n#define INIT_GETGROUPS\n#endif\n\n#if SANITIZER_INTERCEPT_POLL\nstatic void read_pollfd(void *ctx, __sanitizer_pollfd *fds,\n                        __sanitizer_nfds_t nfds) {\n  for (unsigned i = 0; i < nfds; ++i) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].fd, sizeof(fds[i].fd));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].events, sizeof(fds[i].events));\n  }\n}\n\nstatic void write_pollfd(void *ctx, __sanitizer_pollfd *fds,\n                         __sanitizer_nfds_t nfds) {\n  for (unsigned i = 0; i < nfds; ++i)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &fds[i].revents,\n                                   sizeof(fds[i].revents));\n}\n\nINTERCEPTOR(int, poll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,\n            int timeout) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, poll, fds, nfds, timeout);\n  if (fds && nfds) read_pollfd(ctx, fds, nfds);\n  int res = COMMON_INTERCEPTOR_BLOCK_REAL(poll)(fds, nfds, timeout);\n  if (fds && nfds) write_pollfd(ctx, fds, nfds);\n  return res;\n}\n#define INIT_POLL COMMON_INTERCEPT_FUNCTION(poll);\n#else\n#define INIT_POLL\n#endif\n\n#if SANITIZER_INTERCEPT_PPOLL\nINTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,\n            void *timeout_ts, __sanitizer_sigset_t *sigmask) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ppoll, fds, nfds, timeout_ts, sigmask);\n  if (fds && nfds) read_pollfd(ctx, fds, nfds);\n  if (timeout_ts)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout_ts, struct_timespec_sz);\n  if (sigmask) COMMON_INTERCEPTOR_READ_RANGE(ctx, sigmask, sizeof(*sigmask));\n  int res =\n      COMMON_INTERCEPTOR_BLOCK_REAL(ppoll)(fds, nfds, timeout_ts, sigmask);\n  if (fds && nfds) write_pollfd(ctx, fds, nfds);\n  return res;\n}\n#define INIT_PPOLL COMMON_INTERCEPT_FUNCTION(ppoll);\n#else\n#define INIT_PPOLL\n#endif\n\n#if SANITIZER_INTERCEPT_WORDEXP\nINTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);\n  if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(wordexp)(s, p, flags);\n  if (!res && p) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));\n    uptr we_wordc =\n        ((flags & wordexp_wrde_dooffs) ? p->we_offs : 0) + p->we_wordc;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,\n                                   sizeof(*p->we_wordv) * (we_wordc + 1));\n    for (uptr i = 0; i < we_wordc; ++i) {\n      char *w = p->we_wordv[i];\n      if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, internal_strlen(w) + 1);\n    }\n  }\n  return res;\n}\n#define INIT_WORDEXP COMMON_INTERCEPT_FUNCTION(wordexp);\n#else\n#define INIT_WORDEXP\n#endif\n\n#if SANITIZER_INTERCEPT_SIGWAIT\nINTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);\n  if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwait)(set, sig);\n  if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));\n  return res;\n}\n#define INIT_SIGWAIT COMMON_INTERCEPT_FUNCTION(sigwait);\n#else\n#define INIT_SIGWAIT\n#endif\n\n#if SANITIZER_INTERCEPT_SIGWAITINFO\nINTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);\n  if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwaitinfo)(set, info);\n  if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);\n  return res;\n}\n#define INIT_SIGWAITINFO COMMON_INTERCEPT_FUNCTION(sigwaitinfo);\n#else\n#define INIT_SIGWAITINFO\n#endif\n\n#if SANITIZER_INTERCEPT_SIGTIMEDWAIT\nINTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,\n            void *timeout) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);\n  if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);\n  if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigtimedwait)(set, info, timeout);\n  if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);\n  return res;\n}\n#define INIT_SIGTIMEDWAIT COMMON_INTERCEPT_FUNCTION(sigtimedwait);\n#else\n#define INIT_SIGTIMEDWAIT\n#endif\n\n#if SANITIZER_INTERCEPT_SIGSETOPS\nINTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(sigemptyset)(set);\n  if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));\n  return res;\n}\n\nINTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(sigfillset)(set);\n  if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));\n  return res;\n}\n#define INIT_SIGSETOPS                    \\\n  COMMON_INTERCEPT_FUNCTION(sigemptyset); \\\n  COMMON_INTERCEPT_FUNCTION(sigfillset);\n#else\n#define INIT_SIGSETOPS\n#endif\n\n#if SANITIZER_INTERCEPT_SIGSET_LOGICOPS\nINTERCEPTOR(int, sigandset, __sanitizer_sigset_t *dst,\n            __sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigandset, dst, src1, src2);\n  if (src1)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));\n  if (src2)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));\n  int res = REAL(sigandset)(dst, src1, src2);\n  if (!res && dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));\n  return res;\n}\n\nINTERCEPTOR(int, sigorset, __sanitizer_sigset_t *dst,\n            __sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigorset, dst, src1, src2);\n  if (src1)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));\n  if (src2)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));\n  int res = REAL(sigorset)(dst, src1, src2);\n  if (!res && dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));\n  return res;\n}\n#define INIT_SIGSET_LOGICOPS                    \\\n  COMMON_INTERCEPT_FUNCTION(sigandset);   \\\n  COMMON_INTERCEPT_FUNCTION(sigorset);\n#else\n#define INIT_SIGSET_LOGICOPS\n#endif\n\n#if SANITIZER_INTERCEPT_SIGPENDING\nINTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(sigpending)(set);\n  if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));\n  return res;\n}\n#define INIT_SIGPENDING COMMON_INTERCEPT_FUNCTION(sigpending);\n#else\n#define INIT_SIGPENDING\n#endif\n\n#if SANITIZER_INTERCEPT_SIGPROCMASK\nINTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,\n            __sanitizer_sigset_t *oldset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);\n  if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(sigprocmask)(how, set, oldset);\n  if (!res && oldset)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));\n  return res;\n}\n#define INIT_SIGPROCMASK COMMON_INTERCEPT_FUNCTION(sigprocmask);\n#else\n#define INIT_SIGPROCMASK\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_SIGMASK\nINTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,\n            __sanitizer_sigset_t *oldset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_sigmask, how, set, oldset);\n  if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(pthread_sigmask)(how, set, oldset);\n  if (!res && oldset)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));\n  return res;\n}\n#define INIT_PTHREAD_SIGMASK COMMON_INTERCEPT_FUNCTION(pthread_sigmask);\n#else\n#define INIT_PTHREAD_SIGMASK\n#endif\n\n#if SANITIZER_INTERCEPT_BACKTRACE\nINTERCEPTOR(int, backtrace, void **buffer, int size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(backtrace)(buffer, size);\n  if (res && buffer)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));\n  return res;\n}\n\nINTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);\n  if (buffer && size)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char **res = REAL(backtrace_symbols)(buffer, size);\n  if (res && size) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));\n    for (int i = 0; i < size; ++i)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], internal_strlen(res[i]) + 1);\n  }\n  return res;\n}\n#define INIT_BACKTRACE                  \\\n  COMMON_INTERCEPT_FUNCTION(backtrace); \\\n  COMMON_INTERCEPT_FUNCTION(backtrace_symbols);\n#else\n#define INIT_BACKTRACE\n#endif\n\n#if SANITIZER_INTERCEPT__EXIT\nINTERCEPTOR(void, _exit, int status) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, _exit, status);\n  COMMON_INTERCEPTOR_USER_CALLBACK_START();\n  int status1 = COMMON_INTERCEPTOR_ON_EXIT(ctx);\n  COMMON_INTERCEPTOR_USER_CALLBACK_END();\n  if (status == 0) status = status1;\n  REAL(_exit)(status);\n}\n#define INIT__EXIT COMMON_INTERCEPT_FUNCTION(_exit);\n#else\n#define INIT__EXIT\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEX\nINTERCEPTOR(int, pthread_mutex_lock, void *m) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);\n  COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);\n  int res = REAL(pthread_mutex_lock)(m);\n  if (res == errno_EOWNERDEAD)\n    COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);\n  if (res == 0 || res == errno_EOWNERDEAD)\n    COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);\n  if (res == errno_EINVAL)\n    COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);\n  return res;\n}\n\nINTERCEPTOR(int, pthread_mutex_unlock, void *m) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);\n  COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);\n  int res = REAL(pthread_mutex_unlock)(m);\n  if (res == errno_EINVAL)\n    COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);\n  return res;\n}\n\n#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)\n#define INIT_PTHREAD_MUTEX_UNLOCK \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock)\n#else\n#define INIT_PTHREAD_MUTEX_LOCK\n#define INIT_PTHREAD_MUTEX_UNLOCK\n#endif\n\n#if SANITIZER_INTERCEPT___PTHREAD_MUTEX\nINTERCEPTOR(int, __pthread_mutex_lock, void *m) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_lock, m);\n  COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);\n  int res = REAL(__pthread_mutex_lock)(m);\n  if (res == errno_EOWNERDEAD)\n    COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);\n  if (res == 0 || res == errno_EOWNERDEAD)\n    COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);\n  if (res == errno_EINVAL)\n    COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);\n  return res;\n}\n\nINTERCEPTOR(int, __pthread_mutex_unlock, void *m) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_unlock, m);\n  COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);\n  int res = REAL(__pthread_mutex_unlock)(m);\n  if (res == errno_EINVAL)\n    COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);\n  return res;\n}\n\n#define INIT___PTHREAD_MUTEX_LOCK \\\n  COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock)\n#define INIT___PTHREAD_MUTEX_UNLOCK \\\n  COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock)\n#else\n#define INIT___PTHREAD_MUTEX_LOCK\n#define INIT___PTHREAD_MUTEX_UNLOCK\n#endif\n\n#if SANITIZER_INTERCEPT___LIBC_MUTEX\nINTERCEPTOR(int, __libc_mutex_lock, void *m)\nALIAS(WRAPPER_NAME(pthread_mutex_lock));\n\nINTERCEPTOR(int, __libc_mutex_unlock, void *m)\nALIAS(WRAPPER_NAME(pthread_mutex_unlock));\n\nINTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)\nALIAS(WRAPPER_NAME(pthread_setcancelstate));\n\n#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock)\n#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock)\n#define INIT___LIBC_THR_SETCANCELSTATE \\\n  COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)\n#else\n#define INIT___LIBC_MUTEX_LOCK\n#define INIT___LIBC_MUTEX_UNLOCK\n#define INIT___LIBC_THR_SETCANCELSTATE\n#endif\n\n#if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R\nstatic void write_mntent(void *ctx, __sanitizer_mntent *mnt) {\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));\n  if (mnt->mnt_fsname)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname,\n                                   internal_strlen(mnt->mnt_fsname) + 1);\n  if (mnt->mnt_dir)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir,\n                                   internal_strlen(mnt->mnt_dir) + 1);\n  if (mnt->mnt_type)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type,\n                                   internal_strlen(mnt->mnt_type) + 1);\n  if (mnt->mnt_opts)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts,\n                                   internal_strlen(mnt->mnt_opts) + 1);\n}\n#endif\n\n#if SANITIZER_INTERCEPT_GETMNTENT\nINTERCEPTOR(__sanitizer_mntent *, getmntent, void *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getmntent, fp);\n  __sanitizer_mntent *res = REAL(getmntent)(fp);\n  if (res) write_mntent(ctx, res);\n  return res;\n}\n#define INIT_GETMNTENT COMMON_INTERCEPT_FUNCTION(getmntent);\n#else\n#define INIT_GETMNTENT\n#endif\n\n#if SANITIZER_INTERCEPT_GETMNTENT_R\nINTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp,\n            __sanitizer_mntent *mntbuf, char *buf, int buflen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getmntent_r, fp, mntbuf, buf, buflen);\n  __sanitizer_mntent *res = REAL(getmntent_r)(fp, mntbuf, buf, buflen);\n  if (res) write_mntent(ctx, res);\n  return res;\n}\n#define INIT_GETMNTENT_R COMMON_INTERCEPT_FUNCTION(getmntent_r);\n#else\n#define INIT_GETMNTENT_R\n#endif\n\n#if SANITIZER_INTERCEPT_STATFS\nINTERCEPTOR(int, statfs, char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(statfs)(path, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);\n  return res;\n}\nINTERCEPTOR(int, fstatfs, int fd, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fstatfs)(fd, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);\n  return res;\n}\n#define INIT_STATFS                  \\\n  COMMON_INTERCEPT_FUNCTION(statfs); \\\n  COMMON_INTERCEPT_FUNCTION(fstatfs);\n#else\n#define INIT_STATFS\n#endif\n\n#if SANITIZER_INTERCEPT_STATFS64\nINTERCEPTOR(int, statfs64, char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(statfs64)(path, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);\n  return res;\n}\nINTERCEPTOR(int, fstatfs64, int fd, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fstatfs64)(fd, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);\n  return res;\n}\n#define INIT_STATFS64                  \\\n  COMMON_INTERCEPT_FUNCTION(statfs64); \\\n  COMMON_INTERCEPT_FUNCTION(fstatfs64);\n#else\n#define INIT_STATFS64\n#endif\n\n#if SANITIZER_INTERCEPT_STATVFS\nINTERCEPTOR(int, statvfs, char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(statvfs)(path, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);\n  return res;\n}\nINTERCEPTOR(int, fstatvfs, int fd, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fstatvfs)(fd, buf);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);\n    if (fd >= 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return res;\n}\n#define INIT_STATVFS                  \\\n  COMMON_INTERCEPT_FUNCTION(statvfs); \\\n  COMMON_INTERCEPT_FUNCTION(fstatvfs);\n#else\n#define INIT_STATVFS\n#endif\n\n#if SANITIZER_INTERCEPT_STATVFS64\nINTERCEPTOR(int, statvfs64, char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(statvfs64)(path, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);\n  return res;\n}\nINTERCEPTOR(int, fstatvfs64, int fd, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fstatvfs64)(fd, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);\n  return res;\n}\n#define INIT_STATVFS64                  \\\n  COMMON_INTERCEPT_FUNCTION(statvfs64); \\\n  COMMON_INTERCEPT_FUNCTION(fstatvfs64);\n#else\n#define INIT_STATVFS64\n#endif\n\n#if SANITIZER_INTERCEPT_INITGROUPS\nINTERCEPTOR(int, initgroups, char *user, u32 group) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group);\n  if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, internal_strlen(user) + 1);\n  int res = REAL(initgroups)(user, group);\n  return res;\n}\n#define INIT_INITGROUPS COMMON_INTERCEPT_FUNCTION(initgroups);\n#else\n#define INIT_INITGROUPS\n#endif\n\n#if SANITIZER_INTERCEPT_ETHER_NTOA_ATON\nINTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);\n  if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));\n  char *res = REAL(ether_ntoa)(addr);\n  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\nINTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);\n  if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);\n  __sanitizer_ether_addr *res = REAL(ether_aton)(buf);\n  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));\n  return res;\n}\n#define INIT_ETHER_NTOA_ATON             \\\n  COMMON_INTERCEPT_FUNCTION(ether_ntoa); \\\n  COMMON_INTERCEPT_FUNCTION(ether_aton);\n#else\n#define INIT_ETHER_NTOA_ATON\n#endif\n\n#if SANITIZER_INTERCEPT_ETHER_HOST\nINTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr);\n  if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(ether_ntohost)(hostname, addr);\n  if (!res && hostname)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);\n  return res;\n}\nINTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);\n  if (hostname)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, internal_strlen(hostname) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(ether_hostton)(hostname, addr);\n  if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));\n  return res;\n}\nINTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,\n            char *hostname) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);\n  if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, internal_strlen(line) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(ether_line)(line, addr, hostname);\n  if (!res) {\n    if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));\n    if (hostname)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);\n  }\n  return res;\n}\n#define INIT_ETHER_HOST                     \\\n  COMMON_INTERCEPT_FUNCTION(ether_ntohost); \\\n  COMMON_INTERCEPT_FUNCTION(ether_hostton); \\\n  COMMON_INTERCEPT_FUNCTION(ether_line);\n#else\n#define INIT_ETHER_HOST\n#endif\n\n#if SANITIZER_INTERCEPT_ETHER_R\nINTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf);\n  if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(ether_ntoa_r)(addr, buf);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\nINTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,\n            __sanitizer_ether_addr *addr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);\n  if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  __sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);\n  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));\n  return res;\n}\n#define INIT_ETHER_R                       \\\n  COMMON_INTERCEPT_FUNCTION(ether_ntoa_r); \\\n  COMMON_INTERCEPT_FUNCTION(ether_aton_r);\n#else\n#define INIT_ETHER_R\n#endif\n\n#if SANITIZER_INTERCEPT_SHMCTL\nINTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(shmctl)(shmid, cmd, buf);\n  if (res >= 0) {\n    unsigned sz = 0;\n    if (cmd == shmctl_ipc_stat || cmd == shmctl_shm_stat)\n      sz = sizeof(__sanitizer_shmid_ds);\n    else if (cmd == shmctl_ipc_info)\n      sz = struct_shminfo_sz;\n    else if (cmd == shmctl_shm_info)\n      sz = struct_shm_info_sz;\n    if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);\n  }\n  return res;\n}\n#define INIT_SHMCTL COMMON_INTERCEPT_FUNCTION(shmctl);\n#else\n#define INIT_SHMCTL\n#endif\n\n#if SANITIZER_INTERCEPT_RANDOM_R\nINTERCEPTOR(int, random_r, void *buf, u32 *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(random_r)(buf, result);\n  if (!res && result)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\n#define INIT_RANDOM_R COMMON_INTERCEPT_FUNCTION(random_r);\n#else\n#define INIT_RANDOM_R\n#endif\n\n// FIXME: under ASan the REAL() call below may write to freed memory and corrupt\n// its metadata. See\n// https://github.com/google/sanitizers/issues/321.\n#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET ||              \\\n    SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED ||        \\\n    SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \\\n    SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET ||         \\\n    SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET ||        \\\n    SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET ||          \\\n    SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET\n#define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz)            \\\n  INTERCEPTOR(int, fn, void *attr, void *r) {                  \\\n    void *ctx;                                                 \\\n    COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r);                \\\n    int res = REAL(fn)(attr, r);                               \\\n    if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \\\n    return res;                                                \\\n  }\n#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \\\n  INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz)\n#define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \\\n  INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz)\n#define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \\\n  INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz)\n#define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \\\n  INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz)\n#define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \\\n  INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz)\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET\nINTERCEPTOR_PTHREAD_ATTR_GET(detachstate, sizeof(int))\nINTERCEPTOR_PTHREAD_ATTR_GET(guardsize, sizeof(SIZE_T))\nINTERCEPTOR_PTHREAD_ATTR_GET(scope, sizeof(int))\nINTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T))\nINTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(pthread_attr_getstack)(attr, addr, size);\n  if (!res) {\n    if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));\n    if (size) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, size, sizeof(*size));\n  }\n  return res;\n}\n\n// We may need to call the real pthread_attr_getstack from the run-time\n// in sanitizer_common, but we don't want to include the interception headers\n// there. So, just define this function here.\nnamespace __sanitizer {\nextern \"C\" {\nint real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {\n  return REAL(pthread_attr_getstack)(attr, addr, size);\n}\n}  // extern \"C\"\n}  // namespace __sanitizer\n\n#define INIT_PTHREAD_ATTR_GET                             \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getguardsize);   \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getscope);       \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getstacksize);   \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getstack);\n#else\n#define INIT_PTHREAD_ATTR_GET\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED\nINTERCEPTOR_PTHREAD_ATTR_GET(schedparam, struct_sched_param_sz)\nINTERCEPTOR_PTHREAD_ATTR_GET(schedpolicy, sizeof(int))\n\n#define INIT_PTHREAD_ATTR_GET_SCHED                      \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedparam); \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedpolicy);\n#else\n#define INIT_PTHREAD_ATTR_GET_SCHED\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED\nINTERCEPTOR_PTHREAD_ATTR_GET(inheritsched, sizeof(int))\n\n#define INIT_PTHREAD_ATTR_GETINHERITSCHED \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getinheritsched);\n#else\n#define INIT_PTHREAD_ATTR_GETINHERITSCHED\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP\nINTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,\n            void *cpuset) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize,\n                           cpuset);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);\n  if (!res && cpusetsize && cpuset)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);\n  return res;\n}\n\n#define INIT_PTHREAD_ATTR_GETAFFINITY_NP \\\n  COMMON_INTERCEPT_FUNCTION(pthread_attr_getaffinity_np);\n#else\n#define INIT_PTHREAD_ATTR_GETAFFINITY_NP\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED\nINTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))\n#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared);\n#else\n#define INIT_PTHREAD_MUTEXATTR_GETPSHARED\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE\nINTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int))\n#define INIT_PTHREAD_MUTEXATTR_GETTYPE \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype);\n#else\n#define INIT_PTHREAD_MUTEXATTR_GETTYPE\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL\nINTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int))\n#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol);\n#else\n#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING\nINTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int))\n#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling);\n#else\n#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST\nINTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int))\n#define INIT_PTHREAD_MUTEXATTR_GETROBUST \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust);\n#else\n#define INIT_PTHREAD_MUTEXATTR_GETROBUST\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP\nINTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int))\n#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \\\n  COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np);\n#else\n#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED\nINTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int))\n#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \\\n  COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared);\n#else\n#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP\nINTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int))\n#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \\\n  COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np);\n#else\n#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED\nINTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int))\n#define INIT_PTHREAD_CONDATTR_GETPSHARED \\\n  COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared);\n#else\n#define INIT_PTHREAD_CONDATTR_GETPSHARED\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK\nINTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int))\n#define INIT_PTHREAD_CONDATTR_GETCLOCK \\\n  COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock);\n#else\n#define INIT_PTHREAD_CONDATTR_GETCLOCK\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED\nINTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android\n#define INIT_PTHREAD_BARRIERATTR_GETPSHARED \\\n  COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared);\n#else\n#define INIT_PTHREAD_BARRIERATTR_GETPSHARED\n#endif\n\n#if SANITIZER_INTERCEPT_TMPNAM\nINTERCEPTOR(char *, tmpnam, char *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, tmpnam, s);\n  char *res = REAL(tmpnam)(s);\n  if (res) {\n    if (s)\n      // FIXME: under ASan the call below may write to freed memory and corrupt\n      // its metadata. See\n      // https://github.com/google/sanitizers/issues/321.\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);\n    else\n      COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  }\n  return res;\n}\n#define INIT_TMPNAM COMMON_INTERCEPT_FUNCTION(tmpnam);\n#else\n#define INIT_TMPNAM\n#endif\n\n#if SANITIZER_INTERCEPT_TMPNAM_R\nINTERCEPTOR(char *, tmpnam_r, char *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(tmpnam_r)(s);\n  if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);\n  return res;\n}\n#define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r);\n#else\n#define INIT_TMPNAM_R\n#endif\n\n#if SANITIZER_INTERCEPT_PTSNAME\nINTERCEPTOR(char *, ptsname, int fd) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);\n  char *res = REAL(ptsname)(fd);\n  if (res != nullptr)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);\n#else\n#define INIT_PTSNAME\n#endif\n\n#if SANITIZER_INTERCEPT_PTSNAME_R\nINTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);\n  int res = REAL(ptsname_r)(fd, name, namesize);\n  if (res == 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);\n  return res;\n}\n#define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);\n#else\n#define INIT_PTSNAME_R\n#endif\n\n#if SANITIZER_INTERCEPT_TTYNAME\nINTERCEPTOR(char *, ttyname, int fd) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ttyname, fd);\n  char *res = REAL(ttyname)(fd);\n  if (res != nullptr)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_TTYNAME COMMON_INTERCEPT_FUNCTION(ttyname);\n#else\n#define INIT_TTYNAME\n#endif\n\n#if SANITIZER_INTERCEPT_TTYNAME_R\nINTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);\n  int res = REAL(ttyname_r)(fd, name, namesize);\n  if (res == 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);\n  return res;\n}\n#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);\n#else\n#define INIT_TTYNAME_R\n#endif\n\n#if SANITIZER_INTERCEPT_TEMPNAM\nINTERCEPTOR(char *, tempnam, char *dir, char *pfx) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx);\n  if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, internal_strlen(dir) + 1);\n  if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, internal_strlen(pfx) + 1);\n  char *res = REAL(tempnam)(dir, pfx);\n  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);\n#else\n#define INIT_TEMPNAM\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP && !SANITIZER_NETBSD\nINTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);\n  COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name);\n  return REAL(pthread_setname_np)(thread, name);\n}\n#define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np);\n#elif SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP && SANITIZER_NETBSD\nINTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name, void *arg) {\n  void *ctx;\n  char newname[32]; // PTHREAD_MAX_NAMELEN_NP=32\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name, arg);\n  COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);\n  internal_snprintf(newname, sizeof(newname), name, arg);\n  COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, newname);\n  return REAL(pthread_setname_np)(thread, name, arg);\n}\n#define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np);\n#else\n#define INIT_PTHREAD_SETNAME_NP\n#endif\n\n#if SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP\nINTERCEPTOR(int, pthread_getname_np, uptr thread, char *name, SIZE_T len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_getname_np, thread, name, len);\n  int res = REAL(pthread_getname_np)(thread, name, len);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strnlen(name, len) + 1);\n  return res;\n}\n#define INIT_PTHREAD_GETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_getname_np);\n#else\n#define INIT_PTHREAD_GETNAME_NP\n#endif\n\n#if SANITIZER_INTERCEPT_SINCOS\nINTERCEPTOR(void, sincos, double x, double *sin, double *cos) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  REAL(sincos)(x, sin, cos);\n  if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));\n  if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));\n}\nINTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  REAL(sincosf)(x, sin, cos);\n  if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));\n  if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));\n}\nINTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  REAL(sincosl)(x, sin, cos);\n  if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));\n  if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));\n}\n#define INIT_SINCOS                   \\\n  COMMON_INTERCEPT_FUNCTION(sincos);  \\\n  COMMON_INTERCEPT_FUNCTION(sincosf); \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(sincosl);\n#else\n#define INIT_SINCOS\n#endif\n\n#if SANITIZER_INTERCEPT_REMQUO\nINTERCEPTOR(double, remquo, double x, double y, int *quo) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  double res = REAL(remquo)(x, y, quo);\n  if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));\n  return res;\n}\nINTERCEPTOR(float, remquof, float x, float y, int *quo) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  float res = REAL(remquof)(x, y, quo);\n  if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));\n  return res;\n}\n#define INIT_REMQUO                   \\\n  COMMON_INTERCEPT_FUNCTION(remquo);  \\\n  COMMON_INTERCEPT_FUNCTION(remquof);\n#else\n#define INIT_REMQUO\n#endif\n\n#if SANITIZER_INTERCEPT_REMQUOL\nINTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  long double res = REAL(remquol)(x, y, quo);\n  if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));\n  return res;\n}\n#define INIT_REMQUOL                  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(remquol);\n#else\n#define INIT_REMQUOL\n#endif\n\n#if SANITIZER_INTERCEPT_LGAMMA\nextern int signgam;\nINTERCEPTOR(double, lgamma, double x) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgamma, x);\n  double res = REAL(lgamma)(x);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));\n  return res;\n}\nINTERCEPTOR(float, lgammaf, float x) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgammaf, x);\n  float res = REAL(lgammaf)(x);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));\n  return res;\n}\n#define INIT_LGAMMA                   \\\n  COMMON_INTERCEPT_FUNCTION(lgamma);  \\\n  COMMON_INTERCEPT_FUNCTION(lgammaf);\n#else\n#define INIT_LGAMMA\n#endif\n\n#if SANITIZER_INTERCEPT_LGAMMAL\nINTERCEPTOR(long double, lgammal, long double x) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgammal, x);\n  long double res = REAL(lgammal)(x);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));\n  return res;\n}\n#define INIT_LGAMMAL                  \\\n  COMMON_INTERCEPT_FUNCTION_LDBL(lgammal);\n#else\n#define INIT_LGAMMAL\n#endif\n\n#if SANITIZER_INTERCEPT_LGAMMA_R\nINTERCEPTOR(double, lgamma_r, double x, int *signp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  double res = REAL(lgamma_r)(x, signp);\n  if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));\n  return res;\n}\nINTERCEPTOR(float, lgammaf_r, float x, int *signp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  float res = REAL(lgammaf_r)(x, signp);\n  if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));\n  return res;\n}\n#define INIT_LGAMMA_R                   \\\n  COMMON_INTERCEPT_FUNCTION(lgamma_r);  \\\n  COMMON_INTERCEPT_FUNCTION(lgammaf_r);\n#else\n#define INIT_LGAMMA_R\n#endif\n\n#if SANITIZER_INTERCEPT_LGAMMAL_R\nINTERCEPTOR(long double, lgammal_r, long double x, int *signp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  long double res = REAL(lgammal_r)(x, signp);\n  if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));\n  return res;\n}\n#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION_LDBL(lgammal_r);\n#else\n#define INIT_LGAMMAL_R\n#endif\n\n#if SANITIZER_INTERCEPT_DRAND48_R\nINTERCEPTOR(int, drand48_r, void *buffer, double *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(drand48_r)(buffer, result);\n  if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\nINTERCEPTOR(int, lrand48_r, void *buffer, long *result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(lrand48_r)(buffer, result);\n  if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));\n  return res;\n}\n#define INIT_DRAND48_R                  \\\n  COMMON_INTERCEPT_FUNCTION(drand48_r); \\\n  COMMON_INTERCEPT_FUNCTION(lrand48_r);\n#else\n#define INIT_DRAND48_R\n#endif\n\n#if SANITIZER_INTERCEPT_RAND_R\nINTERCEPTOR(int, rand_r, unsigned *seedp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp));\n  return REAL(rand_r)(seedp);\n}\n#define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r);\n#else\n#define INIT_RAND_R\n#endif\n\n#if SANITIZER_INTERCEPT_GETLINE\nINTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(getline)(lineptr, n, stream);\n  if (res > 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1);\n  }\n  return res;\n}\n\n// FIXME: under ASan the call below may write to freed memory and corrupt its\n// metadata. See\n// https://github.com/google/sanitizers/issues/321.\n#define GETDELIM_INTERCEPTOR_IMPL(vname)                                       \\\n  {                                                                            \\\n    void *ctx;                                                                 \\\n    COMMON_INTERCEPTOR_ENTER(ctx, vname, lineptr, n, delim, stream);           \\\n    SSIZE_T res = REAL(vname)(lineptr, n, delim, stream);                      \\\n    if (res > 0) {                                                             \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));          \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));                      \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1);                  \\\n    }                                                                          \\\n    return res;                                                                \\\n  }\n\nINTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim,\n            void *stream)\nGETDELIM_INTERCEPTOR_IMPL(__getdelim)\n\n// There's no __getdelim() on FreeBSD so we supply the getdelim() interceptor\n// with its own body.\nINTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,\n            void *stream)\nGETDELIM_INTERCEPTOR_IMPL(getdelim)\n\n#define INIT_GETLINE                     \\\n  COMMON_INTERCEPT_FUNCTION(getline);    \\\n  COMMON_INTERCEPT_FUNCTION(__getdelim); \\\n  COMMON_INTERCEPT_FUNCTION(getdelim);\n#else\n#define INIT_GETLINE\n#endif\n\n#if SANITIZER_INTERCEPT_ICONV\nINTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,\n            char **outbuf, SIZE_T *outbytesleft) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, iconv, cd, inbuf, inbytesleft, outbuf,\n                           outbytesleft);\n  if (inbytesleft)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, inbytesleft, sizeof(*inbytesleft));\n  if (inbuf && inbytesleft)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, *inbuf, *inbytesleft);\n  if (outbytesleft)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft));\n  void *outbuf_orig = outbuf ? *outbuf : nullptr;\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);\n  if (outbuf && *outbuf > outbuf_orig) {\n    SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz);\n  }\n  return res;\n}\n#define INIT_ICONV COMMON_INTERCEPT_FUNCTION(iconv);\n#else\n#define INIT_ICONV\n#endif\n\n#if SANITIZER_INTERCEPT_TIMES\nINTERCEPTOR(__sanitizer_clock_t, times, void *tms) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, times, tms);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  __sanitizer_clock_t res = REAL(times)(tms);\n  if (res != (__sanitizer_clock_t)-1 && tms)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);\n  return res;\n}\n#define INIT_TIMES COMMON_INTERCEPT_FUNCTION(times);\n#else\n#define INIT_TIMES\n#endif\n\n#if SANITIZER_S390 && \\\n    (SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)\nextern \"C\" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));\nDEFINE_REAL(uptr, __tls_get_offset, void *arg)\n#endif\n\n#if SANITIZER_INTERCEPT_TLS_GET_ADDR\n#if !SANITIZER_S390\n#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)\n// If you see any crashes around this functions, there are 2 known issues with\n// it: 1. __tls_get_addr can be called with mis-aligned stack due to:\n// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066\n// 2. It can be called recursively if sanitizer code uses __tls_get_addr\n// to access thread local variables (it should not happen normally,\n// because sanitizers use initial-exec tls model).\nINTERCEPTOR(void *, __tls_get_addr, void *arg) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);\n  void *res = REAL(__tls_get_addr)(arg);\n  uptr tls_begin, tls_end;\n  COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);\n  DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end);\n  if (dtv) {\n    // New DTLS block has been allocated.\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);\n  }\n  return res;\n}\n#if SANITIZER_PPC\n// On PowerPC, we also need to intercept __tls_get_addr_opt, which has\n// mostly the same semantics as __tls_get_addr, but its presence enables\n// some optimizations in linker (which are safe to ignore here).\nextern \"C\" __attribute__((alias(\"__interceptor___tls_get_addr\"),\n                          visibility(\"default\")))\nvoid *__tls_get_addr_opt(void *arg);\n#endif\n#else // SANITIZER_S390\n// On s390, we have to intercept two functions here:\n// - __tls_get_addr_internal, which is a glibc-internal function that is like\n//   the usual __tls_get_addr, but returns a TP-relative offset instead of\n//   a proper pointer.  It is used by dlsym for TLS symbols.\n// - __tls_get_offset, which is like the above, but also takes a GOT-relative\n//   descriptor offset as an argument instead of a pointer.  GOT address\n//   is passed in r12, so it's necessary to write it in assembly.  This is\n//   the function used by the compiler.\n#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)\nINTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);\n  uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));\n  uptr tp = reinterpret_cast<uptr>(__builtin_thread_pointer());\n  void *ptr = reinterpret_cast<void *>(res + tp);\n  uptr tls_begin, tls_end;\n  COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);\n  DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, ptr, tls_begin, tls_end);\n  if (dtv) {\n    // New DTLS block has been allocated.\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);\n  }\n  return res;\n}\n#endif // SANITIZER_S390\n#else\n#define INIT_TLS_GET_ADDR\n#endif\n\n#if SANITIZER_S390 && \\\n    (SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)\nextern \"C\" uptr __tls_get_offset(void *arg);\nextern \"C\" uptr __interceptor___tls_get_offset(void *arg);\n// We need a hidden symbol aliasing the above, so that we can jump\n// directly to it from the assembly below.\nextern \"C\" __attribute__((alias(\"__interceptor___tls_get_addr_internal\"),\n                          visibility(\"hidden\")))\nuptr __tls_get_addr_hidden(void *arg);\n// Now carefully intercept __tls_get_offset.\nasm(\n  \".text\\n\"\n// The __intercept_ version has to exist, so that gen_dynamic_list.py\n// exports our symbol.\n  \".weak __tls_get_offset\\n\"\n  \".type __tls_get_offset, @function\\n\"\n  \"__tls_get_offset:\\n\"\n  \".global __interceptor___tls_get_offset\\n\"\n  \".type __interceptor___tls_get_offset, @function\\n\"\n  \"__interceptor___tls_get_offset:\\n\"\n#ifdef __s390x__\n  \"la %r2, 0(%r2,%r12)\\n\"\n  \"jg __tls_get_addr_hidden\\n\"\n#else\n  \"basr %r3,0\\n\"\n  \"0: la %r2,0(%r2,%r12)\\n\"\n  \"l %r4,1f-0b(%r3)\\n\"\n  \"b 0(%r4,%r3)\\n\"\n  \"1: .long __tls_get_addr_hidden - 0b\\n\"\n#endif\n  \".size __interceptor___tls_get_offset, .-__interceptor___tls_get_offset\\n\"\n// Assembly wrapper to call REAL(__tls_get_offset)(arg)\n  \".type __tls_get_offset_wrapper, @function\\n\"\n  \"__tls_get_offset_wrapper:\\n\"\n#ifdef __s390x__\n  \"sgr %r2,%r12\\n\"\n#else\n  \"sr %r2,%r12\\n\"\n#endif\n  \"br %r3\\n\"\n  \".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\\n\"\n);\n#endif\n\n#if SANITIZER_INTERCEPT_LISTXATTR\nINTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(listxattr)(path, list, size);\n  // Here and below, size == 0 is a special case where nothing is written to the\n  // buffer, and res contains the desired buffer size.\n  if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);\n  return res;\n}\nINTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(llistxattr)(path, list, size);\n  if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);\n  return res;\n}\nINTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(flistxattr)(fd, list, size);\n  if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);\n  return res;\n}\n#define INIT_LISTXATTR                   \\\n  COMMON_INTERCEPT_FUNCTION(listxattr);  \\\n  COMMON_INTERCEPT_FUNCTION(llistxattr); \\\n  COMMON_INTERCEPT_FUNCTION(flistxattr);\n#else\n#define INIT_LISTXATTR\n#endif\n\n#if SANITIZER_INTERCEPT_GETXATTR\nINTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,\n            SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(getxattr)(path, name, value, size);\n  if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);\n  return res;\n}\nINTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,\n            SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(lgetxattr)(path, name, value, size);\n  if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);\n  return res;\n}\nINTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,\n            SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);\n  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);\n  if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);\n  return res;\n}\n#define INIT_GETXATTR                   \\\n  COMMON_INTERCEPT_FUNCTION(getxattr);  \\\n  COMMON_INTERCEPT_FUNCTION(lgetxattr); \\\n  COMMON_INTERCEPT_FUNCTION(fgetxattr);\n#else\n#define INIT_GETXATTR\n#endif\n\n#if SANITIZER_INTERCEPT_GETRESID\nINTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getresuid)(ruid, euid, suid);\n  if (res >= 0) {\n    if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);\n    if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz);\n    if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz);\n  }\n  return res;\n}\nINTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getresgid)(rgid, egid, sgid);\n  if (res >= 0) {\n    if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);\n    if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz);\n    if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz);\n  }\n  return res;\n}\n#define INIT_GETRESID                   \\\n  COMMON_INTERCEPT_FUNCTION(getresuid); \\\n  COMMON_INTERCEPT_FUNCTION(getresgid);\n#else\n#define INIT_GETRESID\n#endif\n\n#if SANITIZER_INTERCEPT_GETIFADDRS\n// As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to\n// intercept freeifaddrs(). If that ceases to be the case, we might need to\n// intercept it to poison the memory again.\nINTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(getifaddrs)(ifap);\n  if (res == 0 && ifap) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));\n    __sanitizer_ifaddrs *p = *ifap;\n    while (p) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));\n      if (p->ifa_name)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,\n                                       internal_strlen(p->ifa_name) + 1);\n      if (p->ifa_addr)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);\n      if (p->ifa_netmask)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz);\n      // On Linux this is a union, but the other member also points to a\n      // struct sockaddr, so the following is sufficient.\n      if (p->ifa_dstaddr)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz);\n      // FIXME(smatveev): Unpoison p->ifa_data as well.\n      p = p->ifa_next;\n    }\n  }\n  return res;\n}\n#define INIT_GETIFADDRS                  \\\n  COMMON_INTERCEPT_FUNCTION(getifaddrs);\n#else\n#define INIT_GETIFADDRS\n#endif\n\n#if SANITIZER_INTERCEPT_IF_INDEXTONAME\nINTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  char *res = REAL(if_indextoname)(ifindex, ifname);\n  if (res && ifname)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, internal_strlen(ifname) + 1);\n  return res;\n}\nINTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);\n  if (ifname)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, internal_strlen(ifname) + 1);\n  return REAL(if_nametoindex)(ifname);\n}\n#define INIT_IF_INDEXTONAME                  \\\n  COMMON_INTERCEPT_FUNCTION(if_indextoname); \\\n  COMMON_INTERCEPT_FUNCTION(if_nametoindex);\n#else\n#define INIT_IF_INDEXTONAME\n#endif\n\n#if SANITIZER_INTERCEPT_CAPGET\nINTERCEPTOR(int, capget, void *hdrp, void *datap) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);\n  if (hdrp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(capget)(hdrp, datap);\n  if (res == 0 && datap)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);\n  // We can also return -1 and write to hdrp->version if the version passed in\n  // hdrp->version is unsupported. But that's not a trivial condition to check,\n  // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.\n  return res;\n}\nINTERCEPTOR(int, capset, void *hdrp, const void *datap) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);\n  if (hdrp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);\n  if (datap)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);\n  return REAL(capset)(hdrp, datap);\n}\n#define INIT_CAPGET                  \\\n  COMMON_INTERCEPT_FUNCTION(capget); \\\n  COMMON_INTERCEPT_FUNCTION(capset);\n#else\n#define INIT_CAPGET\n#endif\n\n#if SANITIZER_INTERCEPT_AEABI_MEM\nINTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);\n}\n\n// Note the argument order.\nINTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);\n}\n\nINTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);\n}\n\n#define INIT_AEABI_MEM                         \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memmove);  \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy);   \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4);  \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8);  \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memset);   \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memset4);  \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memset8);  \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memclr);   \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4);  \\\n  COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);\n#else\n#define INIT_AEABI_MEM\n#endif  // SANITIZER_INTERCEPT_AEABI_MEM\n\n#if SANITIZER_INTERCEPT___BZERO\nINTERCEPTOR(void *, __bzero, void *block, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);\n}\n#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);\n#else\n#define INIT___BZERO\n#endif  // SANITIZER_INTERCEPT___BZERO\n\n#if SANITIZER_INTERCEPT_BZERO\nINTERCEPTOR(void *, bzero, void *block, uptr size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);\n}\n#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);\n#else\n#define INIT_BZERO\n#endif  // SANITIZER_INTERCEPT_BZERO\n\n#if SANITIZER_INTERCEPT_FTIME\nINTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(ftime)(tp);\n  if (tp)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));\n  return res;\n}\n#define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime);\n#else\n#define INIT_FTIME\n#endif  // SANITIZER_INTERCEPT_FTIME\n\n#if SANITIZER_INTERCEPT_XDR\nINTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,\n            unsigned size, int op) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  REAL(xdrmem_create)(xdrs, addr, size, op);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));\n  if (op == __sanitizer_XDR_ENCODE) {\n    // It's not obvious how much data individual xdr_ routines write.\n    // Simply unpoison the entire target buffer in advance.\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size);\n  }\n}\n\nINTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  REAL(xdrstdio_create)(xdrs, file, op);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));\n}\n\n// FIXME: under ASan the call below may write to freed memory and corrupt\n// its metadata. See\n// https://github.com/google/sanitizers/issues/321.\n#define XDR_INTERCEPTOR(F, T)                             \\\n  INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) {      \\\n    void *ctx;                                            \\\n    COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p);            \\\n    if (p && xdrs->x_op == __sanitizer_XDR_ENCODE)        \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));  \\\n    int res = REAL(F)(xdrs, p);                           \\\n    if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \\\n    return res;                                           \\\n  }\n\nXDR_INTERCEPTOR(xdr_short, short)\nXDR_INTERCEPTOR(xdr_u_short, unsigned short)\nXDR_INTERCEPTOR(xdr_int, int)\nXDR_INTERCEPTOR(xdr_u_int, unsigned)\nXDR_INTERCEPTOR(xdr_long, long)\nXDR_INTERCEPTOR(xdr_u_long, unsigned long)\nXDR_INTERCEPTOR(xdr_hyper, long long)\nXDR_INTERCEPTOR(xdr_u_hyper, unsigned long long)\nXDR_INTERCEPTOR(xdr_longlong_t, long long)\nXDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long)\nXDR_INTERCEPTOR(xdr_int8_t, u8)\nXDR_INTERCEPTOR(xdr_uint8_t, u8)\nXDR_INTERCEPTOR(xdr_int16_t, u16)\nXDR_INTERCEPTOR(xdr_uint16_t, u16)\nXDR_INTERCEPTOR(xdr_int32_t, u32)\nXDR_INTERCEPTOR(xdr_uint32_t, u32)\nXDR_INTERCEPTOR(xdr_int64_t, u64)\nXDR_INTERCEPTOR(xdr_uint64_t, u64)\nXDR_INTERCEPTOR(xdr_quad_t, long long)\nXDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long)\nXDR_INTERCEPTOR(xdr_bool, bool)\nXDR_INTERCEPTOR(xdr_enum, int)\nXDR_INTERCEPTOR(xdr_char, char)\nXDR_INTERCEPTOR(xdr_u_char, unsigned char)\nXDR_INTERCEPTOR(xdr_float, float)\nXDR_INTERCEPTOR(xdr_double, double)\n\n// FIXME: intercept xdr_array, opaque, union, vector, reference, pointer,\n// wrapstring, sizeof\n\nINTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,\n            unsigned maxsize) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize);\n  if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);\n  }\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);\n  if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep));\n    if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep);\n  }\n  return res;\n}\n\nINTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,\n            unsigned maxsize) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);\n  if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, internal_strlen(*p) + 1);\n  }\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(xdr_string)(xdrs, p, maxsize);\n  if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));\n    if (res && *p)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);\n  }\n  return res;\n}\n\n#define INIT_XDR                               \\\n  COMMON_INTERCEPT_FUNCTION(xdrmem_create);    \\\n  COMMON_INTERCEPT_FUNCTION(xdrstdio_create);  \\\n  COMMON_INTERCEPT_FUNCTION(xdr_short);        \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_short);      \\\n  COMMON_INTERCEPT_FUNCTION(xdr_int);          \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_int);        \\\n  COMMON_INTERCEPT_FUNCTION(xdr_long);         \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_long);       \\\n  COMMON_INTERCEPT_FUNCTION(xdr_hyper);        \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_hyper);      \\\n  COMMON_INTERCEPT_FUNCTION(xdr_longlong_t);   \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \\\n  COMMON_INTERCEPT_FUNCTION(xdr_int8_t);       \\\n  COMMON_INTERCEPT_FUNCTION(xdr_uint8_t);      \\\n  COMMON_INTERCEPT_FUNCTION(xdr_int16_t);      \\\n  COMMON_INTERCEPT_FUNCTION(xdr_uint16_t);     \\\n  COMMON_INTERCEPT_FUNCTION(xdr_int32_t);      \\\n  COMMON_INTERCEPT_FUNCTION(xdr_uint32_t);     \\\n  COMMON_INTERCEPT_FUNCTION(xdr_int64_t);      \\\n  COMMON_INTERCEPT_FUNCTION(xdr_uint64_t);     \\\n  COMMON_INTERCEPT_FUNCTION(xdr_quad_t);       \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t);     \\\n  COMMON_INTERCEPT_FUNCTION(xdr_bool);         \\\n  COMMON_INTERCEPT_FUNCTION(xdr_enum);         \\\n  COMMON_INTERCEPT_FUNCTION(xdr_char);         \\\n  COMMON_INTERCEPT_FUNCTION(xdr_u_char);       \\\n  COMMON_INTERCEPT_FUNCTION(xdr_float);        \\\n  COMMON_INTERCEPT_FUNCTION(xdr_double);       \\\n  COMMON_INTERCEPT_FUNCTION(xdr_bytes);        \\\n  COMMON_INTERCEPT_FUNCTION(xdr_string);\n#else\n#define INIT_XDR\n#endif  // SANITIZER_INTERCEPT_XDR\n\n#if SANITIZER_INTERCEPT_XDRREC\ntypedef int (*xdrrec_cb)(char*, char*, int);\nstruct XdrRecWrapper {\n  char *handle;\n  xdrrec_cb rd, wr;\n};\ntypedef AddrHashMap<XdrRecWrapper *, 11> XdrRecWrapMap;\nstatic XdrRecWrapMap *xdrrec_wrap_map;\n\nstatic int xdrrec_wr_wrap(char *handle, char *buf, int count) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(buf, count);\n  XdrRecWrapper *wrap = (XdrRecWrapper *)handle;\n  return wrap->wr(wrap->handle, buf, count);\n}\n\nstatic int xdrrec_rd_wrap(char *handle, char *buf, int count) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  XdrRecWrapper *wrap = (XdrRecWrapper *)handle;\n  return wrap->rd(wrap->handle, buf, count);\n}\n\n// This doesn't apply to the solaris version as it has a different function\n// signature.\nINTERCEPTOR(void, xdrrec_create, __sanitizer_XDR *xdr, unsigned sndsize,\n            unsigned rcvsize, char *handle, int (*rd)(char*, char*, int),\n            int (*wr)(char*, char*, int)) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, xdrrec_create, xdr, sndsize, rcvsize,\n                           handle, rd, wr);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, &xdr->x_op, sizeof xdr->x_op);\n\n  // We can't allocate a wrapper on the stack, as the handle is used outside\n  // this stack frame. So we put it on the heap, and keep track of it with\n  // the HashMap (keyed by x_private). When we later need to xdr_destroy,\n  // we can index the map, free the wrapper, and then clean the map entry.\n  XdrRecWrapper *wrap_data =\n      (XdrRecWrapper *)InternalAlloc(sizeof(XdrRecWrapper));\n  wrap_data->handle = handle;\n  wrap_data->rd = rd;\n  wrap_data->wr = wr;\n  if (wr)\n    wr = xdrrec_wr_wrap;\n  if (rd)\n    rd = xdrrec_rd_wrap;\n  handle = (char *)wrap_data;\n\n  REAL(xdrrec_create)(xdr, sndsize, rcvsize, handle, rd, wr);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdr, sizeof *xdr);\n\n  XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, false, true);\n  *wrap = wrap_data;\n}\n\n// We have to intercept this to be able to free wrapper memory;\n// otherwise it's not necessary.\nINTERCEPTOR(void, xdr_destroy, __sanitizer_XDR *xdr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, xdr_destroy, xdr);\n\n  XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, true);\n  InternalFree(*wrap);\n  REAL(xdr_destroy)(xdr);\n}\n#define INIT_XDRREC_LINUX \\\n  static u64 xdrrec_wrap_mem[sizeof(XdrRecWrapMap) / sizeof(u64) + 1]; \\\n  xdrrec_wrap_map = new ((void *)&xdrrec_wrap_mem) XdrRecWrapMap(); \\\n  COMMON_INTERCEPT_FUNCTION(xdrrec_create); \\\n  COMMON_INTERCEPT_FUNCTION(xdr_destroy);\n#else\n#define INIT_XDRREC_LINUX\n#endif\n\n#if SANITIZER_INTERCEPT_TSEARCH\nINTERCEPTOR(void *, tsearch, void *key, void **rootp,\n            int (*compar)(const void *, const void *)) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  void *res = REAL(tsearch)(key, rootp, compar);\n  if (res && *(void **)res == key)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));\n  return res;\n}\n#define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch);\n#else\n#define INIT_TSEARCH\n#endif\n\n#if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \\\n    SANITIZER_INTERCEPT_OPEN_MEMSTREAM\nvoid unpoison_file(__sanitizer_FILE *fp) {\n#if SANITIZER_HAS_STRUCT_FILE\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp));\n#if SANITIZER_NETBSD\n  if (fp->_bf._base && fp->_bf._size > 0)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_bf._base,\n                                        fp->_bf._size);\n#else\n  if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,\n                                        fp->_IO_read_end - fp->_IO_read_base);\n  if (fp->_IO_write_base && fp->_IO_write_base < fp->_IO_write_end)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_write_base,\n                                        fp->_IO_write_end - fp->_IO_write_base);\n#endif\n#endif  // SANITIZER_HAS_STRUCT_FILE\n}\n#endif\n\n#if SANITIZER_INTERCEPT_LIBIO_INTERNALS\n// These guys are called when a .c source is built with -O2.\nINTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp);\n  int res = REAL(__uflow)(fp);\n  unpoison_file(fp);\n  return res;\n}\nINTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp);\n  int res = REAL(__underflow)(fp);\n  unpoison_file(fp);\n  return res;\n}\nINTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch);\n  int res = REAL(__overflow)(fp, ch);\n  unpoison_file(fp);\n  return res;\n}\nINTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp);\n  int res = REAL(__wuflow)(fp);\n  unpoison_file(fp);\n  return res;\n}\nINTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp);\n  int res = REAL(__wunderflow)(fp);\n  unpoison_file(fp);\n  return res;\n}\nINTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch);\n  int res = REAL(__woverflow)(fp, ch);\n  unpoison_file(fp);\n  return res;\n}\n#define INIT_LIBIO_INTERNALS               \\\n  COMMON_INTERCEPT_FUNCTION(__uflow);      \\\n  COMMON_INTERCEPT_FUNCTION(__underflow);  \\\n  COMMON_INTERCEPT_FUNCTION(__overflow);   \\\n  COMMON_INTERCEPT_FUNCTION(__wuflow);     \\\n  COMMON_INTERCEPT_FUNCTION(__wunderflow); \\\n  COMMON_INTERCEPT_FUNCTION(__woverflow);\n#else\n#define INIT_LIBIO_INTERNALS\n#endif\n\n#if SANITIZER_INTERCEPT_FOPEN\nINTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);\n  __sanitizer_FILE *res = REAL(fopen)(path, mode);\n  COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);\n  if (res) unpoison_file(res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);\n  __sanitizer_FILE *res = REAL(fdopen)(fd, mode);\n  if (res) unpoison_file(res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,\n            __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);\n  COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);\n  __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);\n  COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);\n  if (res) unpoison_file(res);\n  return res;\n}\n#define INIT_FOPEN                   \\\n  COMMON_INTERCEPT_FUNCTION(fopen);  \\\n  COMMON_INTERCEPT_FUNCTION(fdopen); \\\n  COMMON_INTERCEPT_FUNCTION(freopen);\n#else\n#define INIT_FOPEN\n#endif\n\n#if SANITIZER_INTERCEPT_FLOPEN\nINTERCEPTOR(int, flopen, const char *path, int flags, ...) {\n  void *ctx;\n  va_list ap;\n  va_start(ap, flags);\n  u16 mode = static_cast<u16>(va_arg(ap, u32));\n  va_end(ap);\n  COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);\n  if (path) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  }\n  return REAL(flopen)(path, flags, mode);\n}\n\nINTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {\n  void *ctx;\n  va_list ap;\n  va_start(ap, flags);\n  u16 mode = static_cast<u16>(va_arg(ap, u32));\n  va_end(ap);\n  COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);\n  if (path) {\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  }\n  return REAL(flopenat)(dirfd, path, flags, mode);\n}\n\n#define INIT_FLOPEN    \\\n  COMMON_INTERCEPT_FUNCTION(flopen); \\\n  COMMON_INTERCEPT_FUNCTION(flopenat);\n#else\n#define INIT_FLOPEN\n#endif\n\n#if SANITIZER_INTERCEPT_FOPEN64\nINTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);\n  __sanitizer_FILE *res = REAL(fopen64)(path, mode);\n  COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);\n  if (res) unpoison_file(res);\n  return res;\n}\nINTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,\n            __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);\n  COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);\n  __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);\n  COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);\n  if (res) unpoison_file(res);\n  return res;\n}\n#define INIT_FOPEN64                  \\\n  COMMON_INTERCEPT_FUNCTION(fopen64); \\\n  COMMON_INTERCEPT_FUNCTION(freopen64);\n#else\n#define INIT_FOPEN64\n#endif\n\n#if SANITIZER_INTERCEPT_OPEN_MEMSTREAM\nINTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);\n  if (res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));\n    unpoison_file(res);\n    FileMetadata file = {ptr, sizeloc};\n    SetInterceptorMetadata(res, file);\n  }\n  return res;\n}\nINTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr,\n            SIZE_T *sizeloc) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc);\n  __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc);\n  if (res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));\n    unpoison_file(res);\n    FileMetadata file = {(char **)ptr, sizeloc};\n    SetInterceptorMetadata(res, file);\n  }\n  return res;\n}\nINTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,\n            const char *mode) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);\n  if (res) unpoison_file(res);\n  return res;\n}\n#define INIT_OPEN_MEMSTREAM                   \\\n  COMMON_INTERCEPT_FUNCTION(open_memstream);  \\\n  COMMON_INTERCEPT_FUNCTION(open_wmemstream); \\\n  COMMON_INTERCEPT_FUNCTION(fmemopen);\n#else\n#define INIT_OPEN_MEMSTREAM\n#endif\n\n#if SANITIZER_INTERCEPT_OBSTACK\nstatic void initialize_obstack(__sanitizer_obstack *obstack) {\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack));\n  if (obstack->chunk)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk,\n                                        sizeof(*obstack->chunk));\n}\n\nINTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz,\n            int align, void *(*alloc_fn)(uptr arg, uptr sz),\n            void (*free_fn)(uptr arg, void *p)) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn,\n                           free_fn);\n  int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn);\n  if (res) initialize_obstack(obstack);\n  return res;\n}\nINTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz,\n            int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn,\n                           free_fn);\n  int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn);\n  if (res) initialize_obstack(obstack);\n  return res;\n}\nINTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length);\n  REAL(_obstack_newchunk)(obstack, length);\n  if (obstack->chunk)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(\n        obstack->chunk, obstack->next_free - (char *)obstack->chunk);\n}\n#define INIT_OBSTACK                           \\\n  COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \\\n  COMMON_INTERCEPT_FUNCTION(_obstack_begin);   \\\n  COMMON_INTERCEPT_FUNCTION(_obstack_newchunk);\n#else\n#define INIT_OBSTACK\n#endif\n\n#if SANITIZER_INTERCEPT_FFLUSH\nINTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);\n  if (fp)\n    unpoison_file(fp);\n  int res = REAL(fflush)(fp);\n  // FIXME: handle fp == NULL\n  if (fp) {\n    const FileMetadata *m = GetInterceptorMetadata(fp);\n    if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);\n  }\n  return res;\n}\n#define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush);\n#else\n#define INIT_FFLUSH\n#endif\n\n#if SANITIZER_INTERCEPT_FCLOSE\nINTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);\n  COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);\n  const FileMetadata *m = GetInterceptorMetadata(fp);\n  if (fp)\n    unpoison_file(fp);\n  int res = REAL(fclose)(fp);\n  if (m) {\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);\n    DeleteInterceptorMetadata(fp);\n  }\n  return res;\n}\n#define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose);\n#else\n#define INIT_FCLOSE\n#endif\n\n#if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE\nINTERCEPTOR(void*, dlopen, const char *filename, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);\n  if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);\n  void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);\n  Symbolizer::GetOrInit()->InvalidateModuleList();\n  COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);\n  return res;\n}\n\nINTERCEPTOR(int, dlclose, void *handle) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);\n  int res = REAL(dlclose)(handle);\n  Symbolizer::GetOrInit()->InvalidateModuleList();\n  COMMON_INTERCEPTOR_LIBRARY_UNLOADED();\n  return res;\n}\n#define INIT_DLOPEN_DLCLOSE          \\\n  COMMON_INTERCEPT_FUNCTION(dlopen); \\\n  COMMON_INTERCEPT_FUNCTION(dlclose);\n#else\n#define INIT_DLOPEN_DLCLOSE\n#endif\n\n#if SANITIZER_INTERCEPT_GETPASS\nINTERCEPTOR(char *, getpass, const char *prompt) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);\n  if (prompt)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, internal_strlen(prompt)+1);\n  char *res = REAL(getpass)(prompt);\n  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res)+1);\n  return res;\n}\n\n#define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass);\n#else\n#define INIT_GETPASS\n#endif\n\n#if SANITIZER_INTERCEPT_TIMERFD\nINTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value,\n            void *old_value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value,\n                           old_value);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz);\n  int res = REAL(timerfd_settime)(fd, flags, new_value, old_value);\n  if (res != -1 && old_value)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz);\n  return res;\n}\n\nINTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value);\n  int res = REAL(timerfd_gettime)(fd, curr_value);\n  if (res != -1 && curr_value)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz);\n  return res;\n}\n#define INIT_TIMERFD                          \\\n  COMMON_INTERCEPT_FUNCTION(timerfd_settime); \\\n  COMMON_INTERCEPT_FUNCTION(timerfd_gettime);\n#else\n#define INIT_TIMERFD\n#endif\n\n#if SANITIZER_INTERCEPT_MLOCKX\n// Linux kernel has a bug that leads to kernel deadlock if a process\n// maps TBs of memory and then calls mlock().\nstatic void MlockIsUnsupported() {\n  static atomic_uint8_t printed;\n  if (atomic_exchange(&printed, 1, memory_order_relaxed))\n    return;\n  VPrintf(1, \"%s ignores mlock/mlockall/munlock/munlockall\\n\",\n          SanitizerToolName);\n}\n\nINTERCEPTOR(int, mlock, const void *addr, uptr len) {\n  MlockIsUnsupported();\n  return 0;\n}\n\nINTERCEPTOR(int, munlock, const void *addr, uptr len) {\n  MlockIsUnsupported();\n  return 0;\n}\n\nINTERCEPTOR(int, mlockall, int flags) {\n  MlockIsUnsupported();\n  return 0;\n}\n\nINTERCEPTOR(int, munlockall, void) {\n  MlockIsUnsupported();\n  return 0;\n}\n\n#define INIT_MLOCKX                                                            \\\n  COMMON_INTERCEPT_FUNCTION(mlock);                                            \\\n  COMMON_INTERCEPT_FUNCTION(munlock);                                          \\\n  COMMON_INTERCEPT_FUNCTION(mlockall);                                         \\\n  COMMON_INTERCEPT_FUNCTION(munlockall);\n\n#else\n#define INIT_MLOCKX\n#endif  // SANITIZER_INTERCEPT_MLOCKX\n\n#if SANITIZER_INTERCEPT_FOPENCOOKIE\nstruct WrappedCookie {\n  void *real_cookie;\n  __sanitizer_cookie_io_functions_t real_io_funcs;\n};\n\nstatic uptr wrapped_read(void *cookie, char *buf, uptr size) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;\n  __sanitizer_cookie_io_read real_read = wrapped_cookie->real_io_funcs.read;\n  return real_read ? real_read(wrapped_cookie->real_cookie, buf, size) : 0;\n}\n\nstatic uptr wrapped_write(void *cookie, const char *buf, uptr size) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;\n  __sanitizer_cookie_io_write real_write = wrapped_cookie->real_io_funcs.write;\n  return real_write ? real_write(wrapped_cookie->real_cookie, buf, size) : size;\n}\n\nstatic int wrapped_seek(void *cookie, u64 *offset, int whence) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(offset, sizeof(*offset));\n  WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;\n  __sanitizer_cookie_io_seek real_seek = wrapped_cookie->real_io_funcs.seek;\n  return real_seek ? real_seek(wrapped_cookie->real_cookie, offset, whence)\n                   : -1;\n}\n\nstatic int wrapped_close(void *cookie) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;\n  __sanitizer_cookie_io_close real_close = wrapped_cookie->real_io_funcs.close;\n  int res = real_close ? real_close(wrapped_cookie->real_cookie) : 0;\n  InternalFree(wrapped_cookie);\n  return res;\n}\n\nINTERCEPTOR(__sanitizer_FILE *, fopencookie, void *cookie, const char *mode,\n            __sanitizer_cookie_io_functions_t io_funcs) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fopencookie, cookie, mode, io_funcs);\n  WrappedCookie *wrapped_cookie =\n      (WrappedCookie *)InternalAlloc(sizeof(WrappedCookie));\n  wrapped_cookie->real_cookie = cookie;\n  wrapped_cookie->real_io_funcs = io_funcs;\n  __sanitizer_FILE *res =\n      REAL(fopencookie)(wrapped_cookie, mode, {wrapped_read, wrapped_write,\n                                               wrapped_seek, wrapped_close});\n  return res;\n}\n\n#define INIT_FOPENCOOKIE COMMON_INTERCEPT_FUNCTION(fopencookie);\n#else\n#define INIT_FOPENCOOKIE\n#endif  // SANITIZER_INTERCEPT_FOPENCOOKIE\n\n#if SANITIZER_INTERCEPT_SEM\nINTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);\n  // Workaround a bug in glibc's \"old\" semaphore implementation by\n  // zero-initializing the sem_t contents. This has to be done here because\n  // interceptors bind to the lowest symbols version by default, hitting the\n  // buggy code path while the non-sanitized build of the same code works fine.\n  REAL(memset)(s, 0, sizeof(*s));\n  int res = REAL(sem_init)(s, pshared, value);\n  return res;\n}\n\nINTERCEPTOR(int, sem_destroy, __sanitizer_sem_t *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_destroy, s);\n  int res = REAL(sem_destroy)(s);\n  return res;\n}\n\nINTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_wait, s);\n  int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_wait)(s);\n  if (res == 0) {\n    COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);\n  }\n  return res;\n}\n\nINTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);\n  int res = REAL(sem_trywait)(s);\n  if (res == 0) {\n    COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);\n  }\n  return res;\n}\n\nINTERCEPTOR(int, sem_timedwait, __sanitizer_sem_t *s, void *abstime) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_timedwait, s, abstime);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, abstime, struct_timespec_sz);\n  int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_timedwait)(s, abstime);\n  if (res == 0) {\n    COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);\n  }\n  return res;\n}\n\nINTERCEPTOR(int, sem_post, __sanitizer_sem_t *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_post, s);\n  COMMON_INTERCEPTOR_RELEASE(ctx, (uptr)s);\n  int res = REAL(sem_post)(s);\n  return res;\n}\n\nINTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_getvalue, s, sval);\n  int res = REAL(sem_getvalue)(s, sval);\n  if (res == 0) {\n    COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sval, sizeof(*sval));\n  }\n  return res;\n}\n\nINTERCEPTOR(__sanitizer_sem_t *, sem_open, const char *name, int oflag, ...) {\n  void *ctx;\n  va_list ap;\n  va_start(ap, oflag);\n  u32 mode = va_arg(ap, u32);\n  u32 value = va_arg(ap, u32);\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_open, name, oflag, mode, value);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  __sanitizer_sem_t *s = REAL(sem_open)(name, oflag, mode, value);\n  if (s)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, sizeof(*s));\n  va_end(ap);\n  return s;\n}\n\nINTERCEPTOR(int, sem_unlink, const char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sem_unlink, name);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  return REAL(sem_unlink)(name);\n}\n\n#  define INIT_SEM                            \\\n    COMMON_INTERCEPT_FUNCTION(sem_init);      \\\n    COMMON_INTERCEPT_FUNCTION(sem_destroy);   \\\n    COMMON_INTERCEPT_FUNCTION(sem_wait);      \\\n    COMMON_INTERCEPT_FUNCTION(sem_trywait);   \\\n    COMMON_INTERCEPT_FUNCTION(sem_timedwait); \\\n    COMMON_INTERCEPT_FUNCTION(sem_post);      \\\n    COMMON_INTERCEPT_FUNCTION(sem_getvalue);  \\\n    COMMON_INTERCEPT_FUNCTION(sem_open);      \\\n    COMMON_INTERCEPT_FUNCTION(sem_unlink);\n#else\n#  define INIT_SEM\n#endif  // SANITIZER_INTERCEPT_SEM\n\n#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL\nINTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcancelstate, state, oldstate);\n  int res = REAL(pthread_setcancelstate)(state, oldstate);\n  if (res == 0 && oldstate != nullptr)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldstate, sizeof(*oldstate));\n  return res;\n}\n\nINTERCEPTOR(int, pthread_setcanceltype, int type, int *oldtype) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcanceltype, type, oldtype);\n  int res = REAL(pthread_setcanceltype)(type, oldtype);\n  if (res == 0 && oldtype != nullptr)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldtype, sizeof(*oldtype));\n  return res;\n}\n#define INIT_PTHREAD_SETCANCEL                                                 \\\n  COMMON_INTERCEPT_FUNCTION(pthread_setcancelstate);                           \\\n  COMMON_INTERCEPT_FUNCTION(pthread_setcanceltype);\n#else\n#define INIT_PTHREAD_SETCANCEL\n#endif\n\n#if SANITIZER_INTERCEPT_MINCORE\nINTERCEPTOR(int, mincore, void *addr, uptr length, unsigned char *vec) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, mincore, addr, length, vec);\n  int res = REAL(mincore)(addr, length, vec);\n  if (res == 0) {\n    uptr page_size = GetPageSizeCached();\n    uptr vec_size = ((length + page_size - 1) & (~(page_size - 1))) / page_size;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, vec, vec_size);\n  }\n  return res;\n}\n#define INIT_MINCORE COMMON_INTERCEPT_FUNCTION(mincore);\n#else\n#define INIT_MINCORE\n#endif\n\n#if SANITIZER_INTERCEPT_PROCESS_VM_READV\nINTERCEPTOR(SSIZE_T, process_vm_readv, int pid, __sanitizer_iovec *local_iov,\n            uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,\n            uptr flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, process_vm_readv, pid, local_iov, liovcnt,\n                           remote_iov, riovcnt, flags);\n  SSIZE_T res = REAL(process_vm_readv)(pid, local_iov, liovcnt, remote_iov,\n                                       riovcnt, flags);\n  if (res > 0)\n    write_iovec(ctx, local_iov, liovcnt, res);\n  return res;\n}\n\nINTERCEPTOR(SSIZE_T, process_vm_writev, int pid, __sanitizer_iovec *local_iov,\n            uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,\n            uptr flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, process_vm_writev, pid, local_iov, liovcnt,\n                           remote_iov, riovcnt, flags);\n  SSIZE_T res = REAL(process_vm_writev)(pid, local_iov, liovcnt, remote_iov,\n                                        riovcnt, flags);\n  if (res > 0)\n    read_iovec(ctx, local_iov, liovcnt, res);\n  return res;\n}\n#define INIT_PROCESS_VM_READV                                                  \\\n  COMMON_INTERCEPT_FUNCTION(process_vm_readv);                                 \\\n  COMMON_INTERCEPT_FUNCTION(process_vm_writev);\n#else\n#define INIT_PROCESS_VM_READV\n#endif\n\n#if SANITIZER_INTERCEPT_CTERMID\nINTERCEPTOR(char *, ctermid, char *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);\n  char *res = REAL(ctermid)(s);\n  if (res) {\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  }\n  return res;\n}\n#define INIT_CTERMID COMMON_INTERCEPT_FUNCTION(ctermid);\n#else\n#define INIT_CTERMID\n#endif\n\n#if SANITIZER_INTERCEPT_CTERMID_R\nINTERCEPTOR(char *, ctermid_r, char *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);\n  char *res = REAL(ctermid_r)(s);\n  if (res) {\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  }\n  return res;\n}\n#define INIT_CTERMID_R COMMON_INTERCEPT_FUNCTION(ctermid_r);\n#else\n#define INIT_CTERMID_R\n#endif\n\n#if SANITIZER_INTERCEPT_RECV_RECVFROM\nINTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, recv, fd, buf, len, flags);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  SSIZE_T res = REAL(recv)(fd, buf, len, flags);\n  if (res > 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));\n  }\n  if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  return res;\n}\n\nINTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,\n            void *srcaddr, int *addrlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, recvfrom, fd, buf, len, flags, srcaddr,\n                           addrlen);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  SIZE_T srcaddr_sz;\n  if (srcaddr) srcaddr_sz = *addrlen;\n  (void)srcaddr_sz;  // prevent \"set but not used\" warning\n  SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen);\n  if (res > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));\n  if (res >= 0 && srcaddr)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr,\n                                        Min((SIZE_T)*addrlen, srcaddr_sz));\n  return res;\n}\n#define INIT_RECV_RECVFROM          \\\n  COMMON_INTERCEPT_FUNCTION(recv);  \\\n  COMMON_INTERCEPT_FUNCTION(recvfrom);\n#else\n#define INIT_RECV_RECVFROM\n#endif\n\n#if SANITIZER_INTERCEPT_SEND_SENDTO\nINTERCEPTOR(SSIZE_T, send, int fd, void *buf, SIZE_T len, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, send, fd, buf, len, flags);\n  if (fd >= 0) {\n    COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n    COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  }\n  SSIZE_T res = REAL(send)(fd, buf, len, flags);\n  if (common_flags()->intercept_send && res > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));\n  return res;\n}\n\nINTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,\n            void *dstaddr, int addrlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sendto, fd, buf, len, flags, dstaddr, addrlen);\n  if (fd >= 0) {\n    COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n    COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  }\n  // Can't check dstaddr as it may have uninitialized padding at the end.\n  SSIZE_T res = REAL(sendto)(fd, buf, len, flags, dstaddr, addrlen);\n  if (common_flags()->intercept_send && res > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));\n  return res;\n}\n#define INIT_SEND_SENDTO           \\\n  COMMON_INTERCEPT_FUNCTION(send); \\\n  COMMON_INTERCEPT_FUNCTION(sendto);\n#else\n#define INIT_SEND_SENDTO\n#endif\n\n#if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE\nINTERCEPTOR(int, eventfd_read, int fd, u64 *value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  int res = REAL(eventfd_read)(fd, value);\n  if (res == 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, sizeof(*value));\n    if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return res;\n}\nINTERCEPTOR(int, eventfd_write, int fd, u64 value) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value);\n  if (fd >= 0) {\n    COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n    COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);\n  }\n  int res = REAL(eventfd_write)(fd, value);\n  return res;\n}\n#define INIT_EVENTFD_READ_WRITE            \\\n  COMMON_INTERCEPT_FUNCTION(eventfd_read); \\\n  COMMON_INTERCEPT_FUNCTION(eventfd_write)\n#else\n#define INIT_EVENTFD_READ_WRITE\n#endif\n\n#if SANITIZER_INTERCEPT_STAT\nINTERCEPTOR(int, stat, const char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, stat, path, buf);\n  if (common_flags()->intercept_stat)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);\n  int res = REAL(stat)(path, buf);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);\n  return res;\n}\n#define INIT_STAT COMMON_INTERCEPT_FUNCTION(stat)\n#else\n#define INIT_STAT\n#endif\n\n#if SANITIZER_INTERCEPT_LSTAT\nINTERCEPTOR(int, lstat, const char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, lstat, path, buf);\n  if (common_flags()->intercept_stat)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);\n  int res = REAL(lstat)(path, buf);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);\n  return res;\n}\n#define INIT_LSTAT COMMON_INTERCEPT_FUNCTION(lstat)\n#else\n#define INIT_LSTAT\n#endif\n\n#if SANITIZER_INTERCEPT___XSTAT\nINTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __xstat, version, path, buf);\n  if (common_flags()->intercept_stat)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);\n  int res = REAL(__xstat)(version, path, buf);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);\n  return res;\n}\n#define INIT___XSTAT COMMON_INTERCEPT_FUNCTION(__xstat)\n#else\n#define INIT___XSTAT\n#endif\n\n#if SANITIZER_INTERCEPT___XSTAT64\nINTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __xstat64, version, path, buf);\n  if (common_flags()->intercept_stat)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);\n  int res = REAL(__xstat64)(version, path, buf);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);\n  return res;\n}\n#define INIT___XSTAT64 COMMON_INTERCEPT_FUNCTION(__xstat64)\n#else\n#define INIT___XSTAT64\n#endif\n\n#if SANITIZER_INTERCEPT___LXSTAT\nINTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __lxstat, version, path, buf);\n  if (common_flags()->intercept_stat)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);\n  int res = REAL(__lxstat)(version, path, buf);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);\n  return res;\n}\n#define INIT___LXSTAT COMMON_INTERCEPT_FUNCTION(__lxstat)\n#else\n#define INIT___LXSTAT\n#endif\n\n#if SANITIZER_INTERCEPT___LXSTAT64\nINTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __lxstat64, version, path, buf);\n  if (common_flags()->intercept_stat)\n    COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);\n  int res = REAL(__lxstat64)(version, path, buf);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);\n  return res;\n}\n#define INIT___LXSTAT64 COMMON_INTERCEPT_FUNCTION(__lxstat64)\n#else\n#define INIT___LXSTAT64\n#endif\n\n// FIXME: add other *stat interceptor\n\n#if SANITIZER_INTERCEPT_UTMP\nINTERCEPTOR(void *, getutent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);\n  void *res = REAL(getutent)(dummy);\n  if (res)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);\n  return res;\n}\nINTERCEPTOR(void *, getutid, void *ut) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);\n  void *res = REAL(getutid)(ut);\n  if (res)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);\n  return res;\n}\nINTERCEPTOR(void *, getutline, void *ut) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);\n  void *res = REAL(getutline)(ut);\n  if (res)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);\n  return res;\n}\n#define INIT_UTMP                      \\\n  COMMON_INTERCEPT_FUNCTION(getutent); \\\n  COMMON_INTERCEPT_FUNCTION(getutid);  \\\n  COMMON_INTERCEPT_FUNCTION(getutline);\n#else\n#define INIT_UTMP\n#endif\n\n#if SANITIZER_INTERCEPT_UTMPX\nINTERCEPTOR(void *, getutxent, int dummy) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);\n  void *res = REAL(getutxent)(dummy);\n  if (res)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);\n  return res;\n}\nINTERCEPTOR(void *, getutxid, void *ut) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);\n  void *res = REAL(getutxid)(ut);\n  if (res)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);\n  return res;\n}\nINTERCEPTOR(void *, getutxline, void *ut) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);\n  void *res = REAL(getutxline)(ut);\n  if (res)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);\n  return res;\n}\nINTERCEPTOR(void *, pututxline, const void *ut) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pututxline, ut);\n  if (ut)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ut, __sanitizer::struct_utmpx_sz);\n  void *res = REAL(pututxline)(ut);\n  if (res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer::struct_utmpx_sz);\n  return res;\n}\n#define INIT_UTMPX                      \\\n  COMMON_INTERCEPT_FUNCTION(getutxent); \\\n  COMMON_INTERCEPT_FUNCTION(getutxid);  \\\n  COMMON_INTERCEPT_FUNCTION(getutxline); \\\n  COMMON_INTERCEPT_FUNCTION(pututxline);\n#else\n#define INIT_UTMPX\n#endif\n\n#if SANITIZER_INTERCEPT_GETLOADAVG\nINTERCEPTOR(int, getloadavg, double *loadavg, int nelem) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem);\n  int res = REAL(getloadavg)(loadavg, nelem);\n  if (res > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg));\n  return res;\n}\n#define INIT_GETLOADAVG                      \\\n  COMMON_INTERCEPT_FUNCTION(getloadavg);\n#else\n#define INIT_GETLOADAVG\n#endif\n\n#if SANITIZER_INTERCEPT_MCHECK_MPROBE\nINTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {\n  return 0;\n}\n\nINTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {\n  return 0;\n}\n\nINTERCEPTOR(int, mprobe, void *ptr) {\n  return 0;\n}\n#endif\n\nINTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);\n  SIZE_T res = REAL(wcslen)(s);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1));\n  return res;\n}\n\nINTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n);\n  SIZE_T res = REAL(wcsnlen)(s, n);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n));\n  return res;\n}\n#define INIT_WCSLEN                  \\\n  COMMON_INTERCEPT_FUNCTION(wcslen); \\\n  COMMON_INTERCEPT_FUNCTION(wcsnlen);\n\n#if SANITIZER_INTERCEPT_WCSCAT\nINTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);\n  SIZE_T src_size = internal_wcslen(src);\n  SIZE_T dst_size = internal_wcslen(dst);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,\n                                 (src_size + 1) * sizeof(wchar_t));\n  return REAL(wcscat)(dst, src);\n}\n\nINTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);\n  SIZE_T src_size = internal_wcsnlen(src, n);\n  SIZE_T dst_size = internal_wcslen(dst);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, src,\n                                Min(src_size + 1, n) * sizeof(wchar_t));\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,\n                                 (src_size + 1) * sizeof(wchar_t));\n  return REAL(wcsncat)(dst, src, n);\n}\n#define INIT_WCSCAT                  \\\n  COMMON_INTERCEPT_FUNCTION(wcscat); \\\n  COMMON_INTERCEPT_FUNCTION(wcsncat);\n#else\n#define INIT_WCSCAT\n#endif\n\n#if SANITIZER_INTERCEPT_WCSDUP\nINTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, wcsdup, s);\n  SIZE_T len = internal_wcslen(s);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (len + 1));\n  wchar_t *result = REAL(wcsdup)(s);\n  if (result)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(wchar_t) * (len + 1));\n  return result;\n}\n\n#define INIT_WCSDUP COMMON_INTERCEPT_FUNCTION(wcsdup);\n#else\n#define INIT_WCSDUP\n#endif\n\n#if SANITIZER_INTERCEPT_STRXFRM\nstatic SIZE_T RealStrLen(const char *str) { return internal_strlen(str); }\n\nstatic SIZE_T RealStrLen(const wchar_t *str) { return internal_wcslen(str); }\n\n#define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...)             \\\n  {                                                                        \\\n    void *ctx;                                                             \\\n    COMMON_INTERCEPTOR_ENTER(ctx, strxfrm, dest, src, len, ##__VA_ARGS__); \\\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src,                                \\\n                                  sizeof(*src) * (RealStrLen(src) + 1));   \\\n    SIZE_T res = REAL(strxfrm)(dest, src, len, ##__VA_ARGS__);             \\\n    if (res < len)                                                         \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, sizeof(*src) * (res + 1)); \\\n    return res;                                                            \\\n  }\n\nINTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T len) {\n  STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len);\n}\n\nINTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T len,\n            void *locale) {\n  STRXFRM_INTERCEPTOR_IMPL(strxfrm_l, dest, src, len, locale);\n}\n\n#define INIT_STRXFRM                  \\\n  COMMON_INTERCEPT_FUNCTION(strxfrm); \\\n  COMMON_INTERCEPT_FUNCTION(strxfrm_l);\n#else\n#define INIT_STRXFRM\n#endif\n\n#if SANITIZER_INTERCEPT___STRXFRM_L\nINTERCEPTOR(SIZE_T, __strxfrm_l, char *dest, const char *src, SIZE_T len,\n            void *locale) {\n  STRXFRM_INTERCEPTOR_IMPL(__strxfrm_l, dest, src, len, locale);\n}\n\n#define INIT___STRXFRM_L COMMON_INTERCEPT_FUNCTION(__strxfrm_l);\n#else\n#define INIT___STRXFRM_L\n#endif\n\n#if SANITIZER_INTERCEPT_WCSXFRM\nINTERCEPTOR(SIZE_T, wcsxfrm, wchar_t *dest, const wchar_t *src, SIZE_T len) {\n  STRXFRM_INTERCEPTOR_IMPL(wcsxfrm, dest, src, len);\n}\n\nINTERCEPTOR(SIZE_T, wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len,\n            void *locale) {\n  STRXFRM_INTERCEPTOR_IMPL(wcsxfrm_l, dest, src, len, locale);\n}\n\n#define INIT_WCSXFRM                  \\\n  COMMON_INTERCEPT_FUNCTION(wcsxfrm); \\\n  COMMON_INTERCEPT_FUNCTION(wcsxfrm_l);\n#else\n#define INIT_WCSXFRM\n#endif\n\n#if SANITIZER_INTERCEPT___WCSXFRM_L\nINTERCEPTOR(SIZE_T, __wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len,\n            void *locale) {\n  STRXFRM_INTERCEPTOR_IMPL(__wcsxfrm_l, dest, src, len, locale);\n}\n\n#define INIT___WCSXFRM_L COMMON_INTERCEPT_FUNCTION(__wcsxfrm_l);\n#else\n#define INIT___WCSXFRM_L\n#endif\n\n#if SANITIZER_INTERCEPT_ACCT\nINTERCEPTOR(int, acct, const char *file) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, acct, file);\n  if (file)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);\n  return REAL(acct)(file);\n}\n#define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct)\n#else\n#define INIT_ACCT\n#endif\n\n#if SANITIZER_INTERCEPT_USER_FROM_UID\nINTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) {\n  void *ctx;\n  const char *user;\n  COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser);\n  user = REAL(user_from_uid)(uid, nouser);\n  if (user)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, internal_strlen(user) + 1);\n  return user;\n}\n#define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid)\n#else\n#define INIT_USER_FROM_UID\n#endif\n\n#if SANITIZER_INTERCEPT_UID_FROM_USER\nINTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) {\n  void *ctx;\n  int res;\n  COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  res = REAL(uid_from_user)(name, uid);\n  if (uid)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid));\n  return res;\n}\n#define INIT_UID_FROM_USER COMMON_INTERCEPT_FUNCTION(uid_from_user)\n#else\n#define INIT_UID_FROM_USER\n#endif\n\n#if SANITIZER_INTERCEPT_GROUP_FROM_GID\nINTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) {\n  void *ctx;\n  const char *group;\n  COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup);\n  group = REAL(group_from_gid)(gid, nogroup);\n  if (group)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, internal_strlen(group) + 1);\n  return group;\n}\n#define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid)\n#else\n#define INIT_GROUP_FROM_GID\n#endif\n\n#if SANITIZER_INTERCEPT_GID_FROM_GROUP\nINTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) {\n  void *ctx;\n  int res;\n  COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid);\n  if (group)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, group, internal_strlen(group) + 1);\n  res = REAL(gid_from_group)(group, gid);\n  if (gid)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid));\n  return res;\n}\n#define INIT_GID_FROM_GROUP COMMON_INTERCEPT_FUNCTION(gid_from_group)\n#else\n#define INIT_GID_FROM_GROUP\n#endif\n\n#if SANITIZER_INTERCEPT_ACCESS\nINTERCEPTOR(int, access, const char *path, int mode) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode);\n  if (path)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  return REAL(access)(path, mode);\n}\n#define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access)\n#else\n#define INIT_ACCESS\n#endif\n\n#if SANITIZER_INTERCEPT_FACCESSAT\nINTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags);\n  if (path)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  return REAL(faccessat)(fd, path, mode, flags);\n}\n#define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat)\n#else\n#define INIT_FACCESSAT\n#endif\n\n#if SANITIZER_INTERCEPT_GETGROUPLIST\nINTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups,\n            int *ngroups) {\n  void *ctx;\n  int res;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  if (ngroups)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups));\n  res = REAL(getgrouplist)(name, basegid, groups, ngroups);\n  if (!res && groups && ngroups) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups));\n  }\n  return res;\n}\n\n#define INIT_GETGROUPLIST COMMON_INTERCEPT_FUNCTION(getgrouplist);\n#else\n#define INIT_GETGROUPLIST\n#endif\n\n#if SANITIZER_INTERCEPT_GETGROUPMEMBERSHIP\nINTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,\n            int maxgrp, int *ngroups) {\n  void *ctx;\n  int res;\n  COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups,\n                           maxgrp, ngroups);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups);\n  if (!res && groups && ngroups) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups));\n  }\n  return res;\n}\n\n#define INIT_GETGROUPMEMBERSHIP COMMON_INTERCEPT_FUNCTION(getgroupmembership);\n#else\n#define INIT_GETGROUPMEMBERSHIP\n#endif\n\n#if SANITIZER_INTERCEPT_READLINK\nINTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {\n  void* ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  SSIZE_T res = REAL(readlink)(path, buf, bufsiz);\n  if (res > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);\n  return res;\n}\n\n#define INIT_READLINK COMMON_INTERCEPT_FUNCTION(readlink)\n#else\n#define INIT_READLINK\n#endif\n\n#if SANITIZER_INTERCEPT_READLINKAT\nINTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf,\n            SIZE_T bufsiz) {\n  void* ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz);\n  if (res > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);\n  return res;\n}\n\n#define INIT_READLINKAT COMMON_INTERCEPT_FUNCTION(readlinkat)\n#else\n#define INIT_READLINKAT\n#endif\n\n#if SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT\nINTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname,\n            struct file_handle *handle, int *mount_id, int flags) {\n  void* ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle,\n                           mount_id, flags);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, internal_strlen(pathname) + 1);\n\n  __sanitizer_file_handle *sanitizer_handle =\n      reinterpret_cast<__sanitizer_file_handle*>(handle);\n  COMMON_INTERCEPTOR_READ_RANGE(\n      ctx, &sanitizer_handle->handle_bytes,\n      sizeof(sanitizer_handle->handle_bytes));\n\n  int res = REAL(name_to_handle_at)(dirfd, pathname, handle, mount_id, flags);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(\n        ctx, &sanitizer_handle->handle_bytes,\n        sizeof(sanitizer_handle->handle_bytes));\n    COMMON_INTERCEPTOR_WRITE_RANGE(\n        ctx, &sanitizer_handle->handle_type,\n        sizeof(sanitizer_handle->handle_type));\n    COMMON_INTERCEPTOR_WRITE_RANGE(\n        ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mount_id, sizeof(*mount_id));\n  }\n  return res;\n}\n\n#define INIT_NAME_TO_HANDLE_AT COMMON_INTERCEPT_FUNCTION(name_to_handle_at)\n#else\n#define INIT_NAME_TO_HANDLE_AT\n#endif\n\n#if SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT\nINTERCEPTOR(int, open_by_handle_at, int mount_fd, struct file_handle* handle,\n            int flags) {\n  void* ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, open_by_handle_at, mount_fd, handle, flags);\n\n  __sanitizer_file_handle *sanitizer_handle =\n      reinterpret_cast<__sanitizer_file_handle*>(handle);\n  COMMON_INTERCEPTOR_READ_RANGE(\n      ctx, &sanitizer_handle->handle_bytes,\n      sizeof(sanitizer_handle->handle_bytes));\n  COMMON_INTERCEPTOR_READ_RANGE(\n      ctx, &sanitizer_handle->handle_type,\n      sizeof(sanitizer_handle->handle_type));\n  COMMON_INTERCEPTOR_READ_RANGE(\n      ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);\n\n  return REAL(open_by_handle_at)(mount_fd, handle, flags);\n}\n\n#define INIT_OPEN_BY_HANDLE_AT COMMON_INTERCEPT_FUNCTION(open_by_handle_at)\n#else\n#define INIT_OPEN_BY_HANDLE_AT\n#endif\n\n#if SANITIZER_INTERCEPT_STRLCPY\nINTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) {\n  void *ctx;\n  SIZE_T res;\n  COMMON_INTERCEPTOR_ENTER(ctx, strlcpy, dst, src, size);\n  if (src) {\n    // Keep strnlen as macro argument, as macro may ignore it.\n    COMMON_INTERCEPTOR_READ_STRING(\n        ctx, src, Min(internal_strnlen(src, size), size - 1) + 1);\n  }\n  res = REAL(strlcpy)(dst, src, size);\n  COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, internal_strlen(dst) + 1);\n  return res;\n}\n\nINTERCEPTOR(SIZE_T, strlcat, char *dst, char *src, SIZE_T size) {\n  void *ctx;\n  SIZE_T len = 0;\n  COMMON_INTERCEPTOR_ENTER(ctx, strlcat, dst, src, size);\n  // src is checked in the strlcpy() interceptor\n  if (dst) {\n    len = internal_strnlen(dst, size);\n    COMMON_INTERCEPTOR_READ_STRING(ctx, dst, Min(len, size - 1) + 1);\n  }\n  // Reuse the rest of the code in the strlcpy() interceptor\n  return WRAP(strlcpy)(dst + len, src, size - len) + len;\n}\n#define INIT_STRLCPY \\\n  COMMON_INTERCEPT_FUNCTION(strlcpy); \\\n  COMMON_INTERCEPT_FUNCTION(strlcat);\n#else\n#define INIT_STRLCPY\n#endif\n\n#if SANITIZER_INTERCEPT_MMAP\nINTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,\n            OFF_T off) {\n  void *ctx;\n  if (common_flags()->detect_write_exec)\n    ReportMmapWriteExec(prot, flags);\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return (void *)internal_mmap(addr, sz, prot, flags, fd, off);\n  COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off);\n  COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);\n}\n\nINTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {\n  void *ctx;\n  if (common_flags()->detect_write_exec)\n    ReportMmapWriteExec(prot, 0);\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return (int)internal_mprotect(addr, sz, prot);\n  COMMON_INTERCEPTOR_ENTER(ctx, mprotect, addr, sz, prot);\n  MprotectMallocZones(addr, prot);\n  return REAL(mprotect)(addr, sz, prot);\n}\n#define INIT_MMAP                                                              \\\n  COMMON_INTERCEPT_FUNCTION(mmap);                                             \\\n  COMMON_INTERCEPT_FUNCTION(mprotect);\n#else\n#define INIT_MMAP\n#endif\n\n#if SANITIZER_INTERCEPT_MMAP64\nINTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd,\n            OFF64_T off) {\n  void *ctx;\n  if (common_flags()->detect_write_exec)\n    ReportMmapWriteExec(prot, flags);\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return (void *)internal_mmap(addr, sz, prot, flags, fd, off);\n  COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off);\n  COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap64, addr, sz, prot, flags, fd, off);\n}\n#define INIT_MMAP64 COMMON_INTERCEPT_FUNCTION(mmap64);\n#else\n#define INIT_MMAP64\n#endif\n\n#if SANITIZER_INTERCEPT_DEVNAME\nINTERCEPTOR(char *, devname, u64 dev, u32 type) {\n  void *ctx;\n  char *name;\n  COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type);\n  name = REAL(devname)(dev, type);\n  if (name)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);\n  return name;\n}\n#define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname);\n#else\n#define INIT_DEVNAME\n#endif\n\n#if SANITIZER_INTERCEPT_DEVNAME_R\n#if SANITIZER_NETBSD\n#define DEVNAME_R_RETTYPE int\n#define DEVNAME_R_SUCCESS(x) (!(x))\n#else\n#define DEVNAME_R_RETTYPE char*\n#define DEVNAME_R_SUCCESS(x) (x)\n#endif\nINTERCEPTOR(DEVNAME_R_RETTYPE, devname_r, u64 dev, u32 type, char *path,\n            uptr len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len);\n  DEVNAME_R_RETTYPE res = REAL(devname_r)(dev, type, path, len);\n  if (DEVNAME_R_SUCCESS(res))\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, internal_strlen(path) + 1);\n  return res;\n}\n#define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r);\n#else\n#define INIT_DEVNAME_R\n#endif\n\n#if SANITIZER_INTERCEPT_FGETLN\nINTERCEPTOR(char *, fgetln, __sanitizer_FILE *stream, SIZE_T *len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetln, stream, len);\n  char *str = REAL(fgetln)(stream, len);\n  if (str && len) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, *len);\n  }\n  return str;\n}\n#define INIT_FGETLN COMMON_INTERCEPT_FUNCTION(fgetln)\n#else\n#define INIT_FGETLN\n#endif\n\n#if SANITIZER_INTERCEPT_STRMODE\nINTERCEPTOR(void, strmode, u32 mode, char *bp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp);\n  REAL(strmode)(mode, bp);\n  if (bp)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, internal_strlen(bp) + 1);\n}\n#define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode)\n#else\n#define INIT_STRMODE\n#endif\n\n#if SANITIZER_INTERCEPT_TTYENT\nINTERCEPTOR(struct __sanitizer_ttyent *, getttyent, void) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getttyent);\n  struct __sanitizer_ttyent *ttyent = REAL(getttyent)();\n  if (ttyent)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);\n  return ttyent;\n}\nINTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name);\n  if (ttyent)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);\n  return ttyent;\n}\n#define INIT_TTYENT \\\n  COMMON_INTERCEPT_FUNCTION(getttyent); \\\n  COMMON_INTERCEPT_FUNCTION(getttynam);\n#else\n#define INIT_TTYENT\n#endif\n\n#if SANITIZER_INTERCEPT_TTYENTPATH\nINTERCEPTOR(int, setttyentpath, char *path) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path);\n  if (path)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  return REAL(setttyentpath)(path);\n}\n#define INIT_TTYENTPATH COMMON_INTERCEPT_FUNCTION(setttyentpath);\n#else\n#define INIT_TTYENTPATH\n#endif\n\n#if SANITIZER_INTERCEPT_PROTOENT\nstatic void write_protoent(void *ctx, struct __sanitizer_protoent *p) {\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));\n\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, internal_strlen(p->p_name) + 1);\n\n  SIZE_T pp_size = 1; // One handles the trailing \\0\n\n  for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, internal_strlen(*pp) + 1);\n\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,\n                                  pp_size * sizeof(char **));\n}\n\nINTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);\n  struct __sanitizer_protoent *p = REAL(getprotoent)();\n  if (p)\n    write_protoent(ctx, p);\n  return p;\n}\n\nINTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  struct __sanitizer_protoent *p = REAL(getprotobyname)(name);\n  if (p)\n    write_protoent(ctx, p);\n  return p;\n}\n\nINTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto);\n  struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto);\n  if (p)\n    write_protoent(ctx, p);\n  return p;\n}\n#define INIT_PROTOENT \\\n  COMMON_INTERCEPT_FUNCTION(getprotoent); \\\n  COMMON_INTERCEPT_FUNCTION(getprotobyname); \\\n  COMMON_INTERCEPT_FUNCTION(getprotobynumber)\n#else\n#define INIT_PROTOENT\n#endif\n\n#if SANITIZER_INTERCEPT_PROTOENT_R\nINTERCEPTOR(int, getprotoent_r, struct __sanitizer_protoent *result_buf,\n            char *buf, SIZE_T buflen, struct __sanitizer_protoent **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getprotoent_r, result_buf, buf, buflen,\n                           result);\n  int res = REAL(getprotoent_r)(result_buf, buf, buflen, result);\n\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);\n  if (!res && *result)\n    write_protoent(ctx, *result);\n  return res;\n}\n\nINTERCEPTOR(int, getprotobyname_r, const char *name,\n            struct __sanitizer_protoent *result_buf, char *buf, SIZE_T buflen,\n            struct __sanitizer_protoent **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,\n                           buflen, result);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);\n\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);\n  if (!res && *result)\n    write_protoent(ctx, *result);\n  return res;\n}\n\nINTERCEPTOR(int, getprotobynumber_r, int num,\n            struct __sanitizer_protoent *result_buf, char *buf,\n            SIZE_T buflen, struct __sanitizer_protoent **result) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber_r, num, result_buf, buf,\n                           buflen, result);\n  int res = REAL(getprotobynumber_r)(num, result_buf, buf, buflen, result);\n\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);\n  if (!res && *result)\n    write_protoent(ctx, *result);\n  return res;\n}\n\n#define INIT_PROTOENT_R \\\n  COMMON_INTERCEPT_FUNCTION(getprotoent_r); \\\n  COMMON_INTERCEPT_FUNCTION(getprotobyname_r); \\\n  COMMON_INTERCEPT_FUNCTION(getprotobynumber_r);\n#else\n#define INIT_PROTOENT_R\n#endif\n\n#if SANITIZER_INTERCEPT_NETENT\nINTERCEPTOR(struct __sanitizer_netent *, getnetent) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getnetent);\n  struct __sanitizer_netent *n = REAL(getnetent)();\n  if (n) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));\n\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);\n\n    SIZE_T nn_size = 1; // One handles the trailing \\0\n\n    for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);\n\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,\n                                   nn_size * sizeof(char **));\n  }\n  return n;\n}\n\nINTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n  struct __sanitizer_netent *n = REAL(getnetbyname)(name);\n  if (n) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));\n\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);\n\n    SIZE_T nn_size = 1; // One handles the trailing \\0\n\n    for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);\n\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,\n                                   nn_size * sizeof(char **));\n  }\n  return n;\n}\n\nINTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getnetbyaddr, net, type);\n  struct __sanitizer_netent *n = REAL(getnetbyaddr)(net, type);\n  if (n) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));\n\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);\n\n    SIZE_T nn_size = 1; // One handles the trailing \\0\n\n    for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);\n\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,\n                                   nn_size * sizeof(char **));\n  }\n  return n;\n}\n#define INIT_NETENT \\\n  COMMON_INTERCEPT_FUNCTION(getnetent); \\\n  COMMON_INTERCEPT_FUNCTION(getnetbyname); \\\n  COMMON_INTERCEPT_FUNCTION(getnetbyaddr)\n#else\n#define INIT_NETENT\n#endif\n\n#if SANITIZER_INTERCEPT_GETMNTINFO\nINTERCEPTOR(int, getmntinfo, void **mntbufp, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getmntinfo, mntbufp, flags);\n  int cnt = REAL(getmntinfo)(mntbufp, flags);\n  if (cnt > 0 && mntbufp) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));\n    if (*mntbufp)\n#if SANITIZER_NETBSD\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs_sz);\n#else\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statfs_sz);\n#endif\n  }\n  return cnt;\n}\n#define INIT_GETMNTINFO COMMON_INTERCEPT_FUNCTION(getmntinfo)\n#else\n#define INIT_GETMNTINFO\n#endif\n\n#if SANITIZER_INTERCEPT_MI_VECTOR_HASH\nINTERCEPTOR(void, mi_vector_hash, const void *key, SIZE_T len, u32 seed,\n            u32 hashes[3]) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, mi_vector_hash, key, len, seed, hashes);\n  if (key)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, key, len);\n  REAL(mi_vector_hash)(key, len, seed, hashes);\n  if (hashes)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hashes, sizeof(hashes[0]) * 3);\n}\n#define INIT_MI_VECTOR_HASH COMMON_INTERCEPT_FUNCTION(mi_vector_hash)\n#else\n#define INIT_MI_VECTOR_HASH\n#endif\n\n#if SANITIZER_INTERCEPT_SETVBUF\nINTERCEPTOR(int, setvbuf, __sanitizer_FILE *stream, char *buf, int mode,\n  SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setvbuf, stream, buf, mode, size);\n  int ret = REAL(setvbuf)(stream, buf, mode, size);\n  if (buf)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);\n  if (stream)\n      unpoison_file(stream);\n  return ret;\n}\n\nINTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setbuf, stream, buf);\n  REAL(setbuf)(stream, buf);\n  if (buf) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);\n  }\n  if (stream)\n      unpoison_file(stream);\n}\n\nINTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, SIZE_T size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, size);\n  REAL(setbuffer)(stream, buf, size);\n  if (buf) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);\n  }\n  if (stream)\n    unpoison_file(stream);\n}\n\nINTERCEPTOR(void, setlinebuf, __sanitizer_FILE *stream) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, setlinebuf, stream);\n  REAL(setlinebuf)(stream);\n  if (stream)\n    unpoison_file(stream);\n}\n#define INIT_SETVBUF COMMON_INTERCEPT_FUNCTION(setvbuf); \\\n    COMMON_INTERCEPT_FUNCTION(setbuf); \\\n    COMMON_INTERCEPT_FUNCTION(setbuffer); \\\n    COMMON_INTERCEPT_FUNCTION(setlinebuf)\n#else\n#define INIT_SETVBUF\n#endif\n\n#if SANITIZER_INTERCEPT_GETVFSSTAT\nINTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);\n  int ret = REAL(getvfsstat)(buf, bufsize, flags);\n  if (buf && ret > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs_sz);\n  return ret;\n}\n#define INIT_GETVFSSTAT COMMON_INTERCEPT_FUNCTION(getvfsstat)\n#else\n#define INIT_GETVFSSTAT\n#endif\n\n#if SANITIZER_INTERCEPT_REGEX\nINTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, regcomp, preg, pattern, cflags);\n  if (pattern)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, internal_strlen(pattern) + 1);\n  int res = REAL(regcomp)(preg, pattern, cflags);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);\n  return res;\n}\nINTERCEPTOR(int, regexec, const void *preg, const char *string, SIZE_T nmatch,\n            struct __sanitizer_regmatch *pmatch[], int eflags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, regexec, preg, string, nmatch, pmatch, eflags);\n  if (preg)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);\n  if (string)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, string, internal_strlen(string) + 1);\n  int res = REAL(regexec)(preg, string, nmatch, pmatch, eflags);\n  if (!res && pmatch)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pmatch, nmatch * struct_regmatch_sz);\n  return res;\n}\nINTERCEPTOR(SIZE_T, regerror, int errcode, const void *preg, char *errbuf,\n            SIZE_T errbuf_size) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, regerror, errcode, preg, errbuf, errbuf_size);\n  if (preg)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);\n  SIZE_T res = REAL(regerror)(errcode, preg, errbuf, errbuf_size);\n  if (errbuf)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, internal_strlen(errbuf) + 1);\n  return res;\n}\nINTERCEPTOR(void, regfree, const void *preg) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, regfree, preg);\n  if (preg)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);\n  REAL(regfree)(preg);\n}\n#define INIT_REGEX                                                             \\\n  COMMON_INTERCEPT_FUNCTION(regcomp);                                          \\\n  COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(regexec, \"GLIBC_2.3.4\");             \\\n  COMMON_INTERCEPT_FUNCTION(regerror);                                         \\\n  COMMON_INTERCEPT_FUNCTION(regfree);\n#else\n#define INIT_REGEX\n#endif\n\n#if SANITIZER_INTERCEPT_REGEXSUB\nINTERCEPTOR(SSIZE_T, regnsub, char *buf, SIZE_T bufsiz, const char *sub,\n            const struct __sanitizer_regmatch *rm, const char *str) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, regnsub, buf, bufsiz, sub, rm, str);\n  if (sub)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);\n  // The implementation demands and hardcodes 10 elements\n  if (rm)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);\n  if (str)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);\n  SSIZE_T res = REAL(regnsub)(buf, bufsiz, sub, rm, str);\n  if (res > 0 && buf)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);\n  return res;\n}\nINTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,\n            const struct __sanitizer_regmatch *rm, const char *sstr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, regasub, buf, sub, rm, sstr);\n  if (sub)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);\n  // Hardcode 10 elements as this is hardcoded size\n  if (rm)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);\n  if (sstr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, internal_strlen(sstr) + 1);\n  SSIZE_T res = REAL(regasub)(buf, sub, rm, sstr);\n  if (res > 0 && buf) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sizeof(char *));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, internal_strlen(*buf) + 1);\n  }\n  return res;\n}\n\n#define INIT_REGEXSUB                                                          \\\n  COMMON_INTERCEPT_FUNCTION(regnsub);                                          \\\n  COMMON_INTERCEPT_FUNCTION(regasub);\n#else\n#define INIT_REGEXSUB\n#endif\n\n#if SANITIZER_INTERCEPT_FTS\nINTERCEPTOR(void *, fts_open, char *const *path_argv, int options,\n            int (*compar)(void **, void **)) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fts_open, path_argv, options, compar);\n  if (path_argv) {\n    for (char *const *pa = path_argv; ; ++pa) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));\n      if (!*pa)\n        break;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);\n    }\n  }\n  // TODO(kamil): handle compar callback\n  void *fts = REAL(fts_open)(path_argv, options, compar);\n  if (fts)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, fts, struct_FTS_sz);\n  return fts;\n}\n\nINTERCEPTOR(void *, fts_read, void *ftsp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fts_read, ftsp);\n  if (ftsp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);\n  void *ftsent = REAL(fts_read)(ftsp);\n  if (ftsent)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ftsent, struct_FTSENT_sz);\n  return ftsent;\n}\n\nINTERCEPTOR(void *, fts_children, void *ftsp, int options) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fts_children, ftsp, options);\n  if (ftsp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);\n  void *ftsent = REAL(fts_children)(ftsp, options);\n  if (ftsent)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ftsent, struct_FTSENT_sz);\n  return ftsent;\n}\n\nINTERCEPTOR(int, fts_set, void *ftsp, void *f, int options) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fts_set, ftsp, f, options);\n  if (ftsp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);\n  if (f)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, f, struct_FTSENT_sz);\n  return REAL(fts_set)(ftsp, f, options);\n}\n\nINTERCEPTOR(int, fts_close, void *ftsp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fts_close, ftsp);\n  if (ftsp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);\n  return REAL(fts_close)(ftsp);\n}\n#define INIT_FTS                                                               \\\n  COMMON_INTERCEPT_FUNCTION(fts_open);                                         \\\n  COMMON_INTERCEPT_FUNCTION(fts_read);                                         \\\n  COMMON_INTERCEPT_FUNCTION(fts_children);                                     \\\n  COMMON_INTERCEPT_FUNCTION(fts_set);                                          \\\n  COMMON_INTERCEPT_FUNCTION(fts_close);\n#else\n#define INIT_FTS\n#endif\n\n#if SANITIZER_INTERCEPT_SYSCTL\nINTERCEPTOR(int, sysctl, int *name, unsigned int namelen, void *oldp,\n            SIZE_T *oldlenp, void *newp, SIZE_T newlen) {\n  void *ctx;\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_sysctl(name, namelen, oldp, oldlenp, newp, newlen);\n  COMMON_INTERCEPTOR_ENTER(ctx, sysctl, name, namelen, oldp, oldlenp, newp,\n                           newlen);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, namelen * sizeof(*name));\n  if (oldlenp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));\n  if (newp && newlen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, newp, newlen);\n  int res = REAL(sysctl)(name, namelen, oldp, oldlenp, newp, newlen);\n  if (!res) {\n    if (oldlenp) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldlenp, sizeof(*oldlenp));\n      if (oldp)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldp, *oldlenp);\n    }\n  }\n  return res;\n}\n\nINTERCEPTOR(int, sysctlbyname, char *sname, void *oldp, SIZE_T *oldlenp,\n            void *newp, SIZE_T newlen) {\n  void *ctx;\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_sysctlbyname(sname, oldp, oldlenp, newp, newlen);\n  COMMON_INTERCEPTOR_ENTER(ctx, sysctlbyname, sname, oldp, oldlenp, newp,\n                           newlen);\n  if (sname)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);\n  if (oldlenp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));\n  if (newp && newlen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, newp, newlen);\n  int res = REAL(sysctlbyname)(sname, oldp, oldlenp, newp, newlen);\n  if (!res) {\n    if (oldlenp) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldlenp, sizeof(*oldlenp));\n      if (oldp)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldp, *oldlenp);\n    }\n  }\n  return res;\n}\n\nINTERCEPTOR(int, sysctlnametomib, const char *sname, int *name,\n            SIZE_T *namelenp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sysctlnametomib, sname, name, namelenp);\n  if (sname)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);\n  if (namelenp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));\n  int res = REAL(sysctlnametomib)(sname, name, namelenp);\n  if (!res) {\n    if (namelenp) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelenp, sizeof(*namelenp));\n      if (name)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, *namelenp * sizeof(*name));\n    }\n  }\n  return res;\n}\n\n#define INIT_SYSCTL                        \\\n  COMMON_INTERCEPT_FUNCTION(sysctl);       \\\n  COMMON_INTERCEPT_FUNCTION(sysctlbyname); \\\n  COMMON_INTERCEPT_FUNCTION(sysctlnametomib);\n#else\n#define INIT_SYSCTL\n#endif\n\n#if SANITIZER_INTERCEPT_ASYSCTL\nINTERCEPTOR(void *, asysctl, const int *name, SIZE_T namelen, SIZE_T *len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, asysctl, name, namelen, len);\n  if (name)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, sizeof(*name) * namelen);\n  void *res = REAL(asysctl)(name, namelen, len);\n  if (res && len) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, *len);\n  }\n  return res;\n}\n\nINTERCEPTOR(void *, asysctlbyname, const char *sname, SIZE_T *len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, asysctlbyname, sname, len);\n  if (sname)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);\n  void *res = REAL(asysctlbyname)(sname, len);\n  if (res && len) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, *len);\n  }\n  return res;\n}\n#define INIT_ASYSCTL                           \\\n  COMMON_INTERCEPT_FUNCTION(asysctl);          \\\n  COMMON_INTERCEPT_FUNCTION(asysctlbyname);\n#else\n#define INIT_ASYSCTL\n#endif\n\n#if SANITIZER_INTERCEPT_SYSCTLGETMIBINFO\nINTERCEPTOR(int, sysctlgetmibinfo, char *sname, int *name,\n            unsigned int *namelenp, char *cname, SIZE_T *csz, void **rnode,\n            int v) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sysctlgetmibinfo, sname, name, namelenp, cname,\n                           csz, rnode, v);\n  if (sname)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);\n  if (namelenp)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));\n  if (csz)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, csz, sizeof(*csz));\n  // Skip rnode, it's rarely used and not trivial to sanitize\n  // It's also used mostly internally\n  int res = REAL(sysctlgetmibinfo)(sname, name, namelenp, cname, csz, rnode, v);\n  if (!res) {\n    if (namelenp) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelenp, sizeof(*namelenp));\n      if (name)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, *namelenp * sizeof(*name));\n    }\n    if (csz) {\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, csz, sizeof(*csz));\n      if (cname)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cname, *csz);\n    }\n  }\n  return res;\n}\n#define INIT_SYSCTLGETMIBINFO                  \\\n  COMMON_INTERCEPT_FUNCTION(sysctlgetmibinfo);\n#else\n#define INIT_SYSCTLGETMIBINFO\n#endif\n\n#if SANITIZER_INTERCEPT_NL_LANGINFO\nINTERCEPTOR(char *, nl_langinfo, long item) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, nl_langinfo, item);\n  char *ret = REAL(nl_langinfo)(item);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);\n  return ret;\n}\n#define INIT_NL_LANGINFO COMMON_INTERCEPT_FUNCTION(nl_langinfo)\n#else\n#define INIT_NL_LANGINFO\n#endif\n\n#if SANITIZER_INTERCEPT_MODCTL\nINTERCEPTOR(int, modctl, int operation, void *argp) {\n  void *ctx;\n  int ret;\n  COMMON_INTERCEPTOR_ENTER(ctx, modctl, operation, argp);\n\n  if (operation == modctl_load) {\n    if (argp) {\n      __sanitizer_modctl_load_t *ml = (__sanitizer_modctl_load_t *)argp;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, ml, sizeof(*ml));\n      if (ml->ml_filename)\n        COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_filename,\n                                      internal_strlen(ml->ml_filename) + 1);\n      if (ml->ml_props)\n        COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_props, ml->ml_propslen);\n    }\n    ret = REAL(modctl)(operation, argp);\n  } else if (operation == modctl_unload) {\n    if (argp) {\n      const char *name = (const char *)argp;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);\n    }\n    ret = REAL(modctl)(operation, argp);\n  } else if (operation == modctl_stat) {\n    uptr iov_len;\n    struct __sanitizer_iovec *iov = (struct __sanitizer_iovec *)argp;\n    if (iov) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, iov, sizeof(*iov));\n      iov_len = iov->iov_len;\n    }\n    ret = REAL(modctl)(operation, argp);\n    if (iov)\n      COMMON_INTERCEPTOR_WRITE_RANGE(\n          ctx, iov->iov_base, Min(iov_len,  iov->iov_len));\n  } else if (operation == modctl_exists) {\n    ret = REAL(modctl)(operation, argp);\n  } else {\n    ret = REAL(modctl)(operation, argp);\n  }\n\n  return ret;\n}\n#define INIT_MODCTL COMMON_INTERCEPT_FUNCTION(modctl)\n#else\n#define INIT_MODCTL\n#endif\n\n#if SANITIZER_INTERCEPT_STRTONUM\nINTERCEPTOR(long long, strtonum, const char *nptr, long long minval,\n            long long maxval, const char **errstr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strtonum, nptr, minval, maxval, errstr);\n\n  // TODO(kamil): Implement strtoll as a common inteceptor\n  char *real_endptr;\n  long long ret = (long long)REAL(strtoimax)(nptr, &real_endptr, 10);\n  StrtolFixAndCheck(ctx, nptr, nullptr, real_endptr, 10);\n\n  ret = REAL(strtonum)(nptr, minval, maxval, errstr);\n  if (errstr) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errstr, sizeof(const char *));\n     if (*errstr)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, internal_strlen(*errstr) + 1);\n  }\n  return ret;\n}\n#define INIT_STRTONUM COMMON_INTERCEPT_FUNCTION(strtonum)\n#else\n#define INIT_STRTONUM\n#endif\n\n#if SANITIZER_INTERCEPT_FPARSELN\nINTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,\n            SIZE_T *lineno, const char delim[3], int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fparseln, stream, len, lineno, delim, flags);\n  if (lineno)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, lineno, sizeof(*lineno));\n  if (delim)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, delim, sizeof(delim[0]) * 3);\n  char *ret = REAL(fparseln)(stream, len, lineno, delim, flags);\n  if (ret) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);\n    if (len)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));\n    if (lineno)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineno, sizeof(*lineno));\n  }\n  return ret;\n}\n#define INIT_FPARSELN COMMON_INTERCEPT_FUNCTION(fparseln)\n#else\n#define INIT_FPARSELN\n#endif\n\n#if SANITIZER_INTERCEPT_STATVFS1\nINTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  int res = REAL(statvfs1)(path, buf, flags);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);\n  return res;\n}\nINTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  int res = REAL(fstatvfs1)(fd, buf, flags);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);\n    if (fd >= 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return res;\n}\n#define INIT_STATVFS1                  \\\n  COMMON_INTERCEPT_FUNCTION(statvfs1);  \\\n  COMMON_INTERCEPT_FUNCTION(fstatvfs1);\n#else\n#define INIT_STATVFS1\n#endif\n\n#if SANITIZER_INTERCEPT_STRTOI\nINTERCEPTOR(INTMAX_T, strtoi, const char *nptr, char **endptr, int base,\n            INTMAX_T low, INTMAX_T high, int *rstatus) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strtoi, nptr, endptr, base, low, high, rstatus);\n  char *real_endptr;\n  INTMAX_T ret = REAL(strtoi)(nptr, &real_endptr, base, low, high, rstatus);\n  StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);\n  if (rstatus)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rstatus, sizeof(*rstatus));\n  return ret;\n}\n\nINTERCEPTOR(UINTMAX_T, strtou, const char *nptr, char **endptr, int base,\n            UINTMAX_T low, UINTMAX_T high, int *rstatus) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strtou, nptr, endptr, base, low, high, rstatus);\n  char *real_endptr;\n  UINTMAX_T ret = REAL(strtou)(nptr, &real_endptr, base, low, high, rstatus);\n  StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);\n  if (rstatus)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rstatus, sizeof(*rstatus));\n  return ret;\n}\n#define INIT_STRTOI                                                            \\\n  COMMON_INTERCEPT_FUNCTION(strtoi);                                           \\\n  COMMON_INTERCEPT_FUNCTION(strtou)\n#else\n#define INIT_STRTOI\n#endif\n\n#if SANITIZER_INTERCEPT_CAPSICUM\n#define CAP_RIGHTS_INIT_INTERCEPTOR(cap_rights_init, rights, ...)          \\\n  {                                                                        \\\n    void *ctx;                                                             \\\n    COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_init, rights, ##__VA_ARGS__); \\\n    if (rights)                                                            \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));         \\\n    __sanitizer_cap_rights_t *ret =                                        \\\n        REAL(cap_rights_init)(rights, ##__VA_ARGS__);                      \\\n    if (ret)                                                               \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret));              \\\n    return ret;                                                            \\\n  }\n\n#define CAP_RIGHTS_SET_INTERCEPTOR(cap_rights_set, rights, ...)           \\\n  {                                                                       \\\n    void *ctx;                                                            \\\n    COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_set, rights, ##__VA_ARGS__); \\\n    if (rights)                                                           \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));        \\\n    __sanitizer_cap_rights_t *ret =                                       \\\n        REAL(cap_rights_set)(rights, ##__VA_ARGS__);                      \\\n    if (ret)                                                              \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret));             \\\n    return ret;                                                           \\\n  }\n\n#define CAP_RIGHTS_CLEAR_INTERCEPTOR(cap_rights_clear, rights, ...)         \\\n  {                                                                         \\\n    void *ctx;                                                              \\\n    COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_clear, rights, ##__VA_ARGS__); \\\n    if (rights)                                                             \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));          \\\n    __sanitizer_cap_rights_t *ret =                                         \\\n        REAL(cap_rights_clear)(rights, ##__VA_ARGS__);                      \\\n    if (ret)                                                                \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret));               \\\n    return ret;                                                             \\\n  }\n\n#define CAP_RIGHTS_IS_SET_INTERCEPTOR(cap_rights_is_set, rights, ...)        \\\n  {                                                                          \\\n    void *ctx;                                                               \\\n    COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_is_set, rights, ##__VA_ARGS__); \\\n    if (rights)                                                              \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));           \\\n    return REAL(cap_rights_is_set)(rights, ##__VA_ARGS__);                   \\\n  }\n\nINTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_init,\n            __sanitizer_cap_rights_t *rights) {\n  CAP_RIGHTS_INIT_INTERCEPTOR(cap_rights_init, rights);\n}\n\nINTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_set,\n            __sanitizer_cap_rights_t *rights) {\n  CAP_RIGHTS_SET_INTERCEPTOR(cap_rights_set, rights);\n}\n\nINTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_clear,\n            __sanitizer_cap_rights_t *rights) {\n  CAP_RIGHTS_CLEAR_INTERCEPTOR(cap_rights_clear, rights);\n}\n\nINTERCEPTOR(bool, cap_rights_is_set,\n            __sanitizer_cap_rights_t *rights) {\n  CAP_RIGHTS_IS_SET_INTERCEPTOR(cap_rights_is_set, rights);\n}\n\nINTERCEPTOR(int, cap_rights_limit, int fd,\n            const __sanitizer_cap_rights_t *rights) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_limit, fd, rights);\n  if (rights)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));\n\n  return REAL(cap_rights_limit)(fd, rights);\n}\n\nINTERCEPTOR(int, cap_rights_get, int fd, __sanitizer_cap_rights_t *rights) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_get, fd, rights);\n  int ret = REAL(cap_rights_get)(fd, rights);\n  if (!ret && rights)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rights, sizeof(*rights));\n\n  return ret;\n}\n\nINTERCEPTOR(bool, cap_rights_is_valid, const __sanitizer_cap_rights_t *rights) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_is_valid, rights);\n  if (rights)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));\n\n  return REAL(cap_rights_is_valid(rights));\n}\n\nINTERCEPTOR(__sanitizer_cap_rights *, cap_rights_merge,\n  __sanitizer_cap_rights *dst, const __sanitizer_cap_rights *src) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_merge, dst, src);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));\n\n  __sanitizer_cap_rights *ret = REAL(cap_rights_merge)(dst, src);\n  if (dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));\n\n  return ret;\n}\n\nINTERCEPTOR(__sanitizer_cap_rights *, cap_rights_remove,\n  __sanitizer_cap_rights *dst, const __sanitizer_cap_rights *src) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_remove, dst, src);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));\n\n  __sanitizer_cap_rights *ret = REAL(cap_rights_remove)(dst, src);\n  if (dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));\n\n  return ret;\n}\n\nINTERCEPTOR(bool, cap_rights_contains, const __sanitizer_cap_rights *big,\n  const __sanitizer_cap_rights *little) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_contains, big, little);\n  if (little)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, little, sizeof(*little));\n  if (big)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, big, sizeof(*big));\n\n  return REAL(cap_rights_contains)(big, little);\n}\n\nINTERCEPTOR(int, cap_ioctls_limit, int fd, const uptr *cmds, SIZE_T ncmds) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_ioctls_limit, fd, cmds, ncmds);\n  if (cmds)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cmds, sizeof(*cmds) * ncmds);\n\n  return REAL(cap_ioctls_limit)(fd, cmds, ncmds);\n}\n\nINTERCEPTOR(int, cap_ioctls_get, int fd, uptr *cmds, SIZE_T maxcmds) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cap_ioctls_get, fd, cmds, maxcmds);\n  int ret = REAL(cap_ioctls_get)(fd, cmds, maxcmds);\n  if (!ret && cmds)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cmds, sizeof(*cmds) * maxcmds);\n\n  return ret;\n}\n#define INIT_CAPSICUM                          \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_init); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_set); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_clear); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_is_set); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_get);   \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_limit); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_contains); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_remove); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_merge); \\\n  COMMON_INTERCEPT_FUNCTION(cap_rights_is_valid); \\\n  COMMON_INTERCEPT_FUNCTION(cap_ioctls_get);   \\\n  COMMON_INTERCEPT_FUNCTION(cap_ioctls_limit)\n#else\n#define INIT_CAPSICUM\n#endif\n\n#if SANITIZER_INTERCEPT_SHA1\nINTERCEPTOR(void, SHA1Init, void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1Init, context);\n  REAL(SHA1Init)(context);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA1_CTX_sz);\n}\nINTERCEPTOR(void, SHA1Update, void *context, const u8 *data, unsigned len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1Update, context, data, len);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);\n  REAL(SHA1Update)(context, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA1_CTX_sz);\n}\nINTERCEPTOR(void, SHA1Final, u8 digest[20], void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1Final, digest, context);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);\n  REAL(SHA1Final)(digest, context);\n  if (digest)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(u8) * 20);\n}\nINTERCEPTOR(void, SHA1Transform, u32 state[5], u8 buffer[64]) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1Transform, state, buffer);\n  if (state)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, state, sizeof(u32) * 5);\n  if (buffer)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, sizeof(u8) * 64);\n  REAL(SHA1Transform)(state, buffer);\n  if (state)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, state, sizeof(u32) * 5);\n}\nINTERCEPTOR(char *, SHA1End, void *context, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1End, context, buf);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);\n  char *ret = REAL(SHA1End)(context, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);\n  return ret;\n}\nINTERCEPTOR(char *, SHA1File, char *filename, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1File, filename, buf);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(SHA1File)(filename, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);\n  return ret;\n}\nINTERCEPTOR(char *, SHA1FileChunk, char *filename, char *buf, OFF_T offset,\n  OFF_T length) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1FileChunk, filename, buf, offset, length);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(SHA1FileChunk)(filename, buf, offset, length);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);\n  return ret;\n}\nINTERCEPTOR(char *, SHA1Data, u8 *data, SIZE_T len, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, SHA1Data, data, len, buf);\n  if (data)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  char *ret = REAL(SHA1Data)(data, len, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);\n  return ret;\n}\n#define INIT_SHA1                                                              \\\n  COMMON_INTERCEPT_FUNCTION(SHA1Init);                                         \\\n  COMMON_INTERCEPT_FUNCTION(SHA1Update);                                       \\\n  COMMON_INTERCEPT_FUNCTION(SHA1Final);                                        \\\n  COMMON_INTERCEPT_FUNCTION(SHA1Transform);                                    \\\n  COMMON_INTERCEPT_FUNCTION(SHA1End);                                          \\\n  COMMON_INTERCEPT_FUNCTION(SHA1File);                                         \\\n  COMMON_INTERCEPT_FUNCTION(SHA1FileChunk);                                    \\\n  COMMON_INTERCEPT_FUNCTION(SHA1Data)\n#else\n#define INIT_SHA1\n#endif\n\n#if SANITIZER_INTERCEPT_MD4\nINTERCEPTOR(void, MD4Init, void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD4Init, context);\n  REAL(MD4Init)(context);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD4_CTX_sz);\n}\n\nINTERCEPTOR(void, MD4Update, void *context, const unsigned char *data,\n            unsigned int len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD4Update, context, data, len);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);\n  REAL(MD4Update)(context, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD4_CTX_sz);\n}\n\nINTERCEPTOR(void, MD4Final, unsigned char digest[16], void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD4Final, digest, context);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);\n  REAL(MD4Final)(digest, context);\n  if (digest)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);\n}\n\nINTERCEPTOR(char *, MD4End, void *context, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD4End, context, buf);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);\n  char *ret = REAL(MD4End)(context, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);\n  return ret;\n}\n\nINTERCEPTOR(char *, MD4File, const char *filename, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD4File, filename, buf);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(MD4File)(filename, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);\n  return ret;\n}\n\nINTERCEPTOR(char *, MD4Data, const unsigned char *data, unsigned int len,\n            char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD4Data, data, len, buf);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  char *ret = REAL(MD4Data)(data, len, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);\n  return ret;\n}\n\n#define INIT_MD4                                                               \\\n  COMMON_INTERCEPT_FUNCTION(MD4Init);                                          \\\n  COMMON_INTERCEPT_FUNCTION(MD4Update);                                        \\\n  COMMON_INTERCEPT_FUNCTION(MD4Final);                                         \\\n  COMMON_INTERCEPT_FUNCTION(MD4End);                                           \\\n  COMMON_INTERCEPT_FUNCTION(MD4File);                                          \\\n  COMMON_INTERCEPT_FUNCTION(MD4Data)\n#else\n#define INIT_MD4\n#endif\n\n#if SANITIZER_INTERCEPT_RMD160\nINTERCEPTOR(void, RMD160Init, void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160Init, context);\n  REAL(RMD160Init)(context);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, RMD160_CTX_sz);\n}\nINTERCEPTOR(void, RMD160Update, void *context, const u8 *data, unsigned len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160Update, context, data, len);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);\n  REAL(RMD160Update)(context, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, RMD160_CTX_sz);\n}\nINTERCEPTOR(void, RMD160Final, u8 digest[20], void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160Final, digest, context);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);\n  REAL(RMD160Final)(digest, context);\n  if (digest)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(u8) * 20);\n}\nINTERCEPTOR(void, RMD160Transform, u32 state[5], u16 buffer[16]) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160Transform, state, buffer);\n  if (state)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, state, sizeof(u32) * 5);\n  if (buffer)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, sizeof(u32) * 16);\n  REAL(RMD160Transform)(state, buffer);\n  if (state)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, state, sizeof(u32) * 5);\n}\nINTERCEPTOR(char *, RMD160End, void *context, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160End, context, buf);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);\n  char *ret = REAL(RMD160End)(context, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);\n  return ret;\n}\nINTERCEPTOR(char *, RMD160File, char *filename, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160File, filename, buf);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(RMD160File)(filename, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);\n  return ret;\n}\nINTERCEPTOR(char *, RMD160FileChunk, char *filename, char *buf, OFF_T offset,\n  OFF_T length) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160FileChunk, filename, buf, offset, length);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(RMD160FileChunk)(filename, buf, offset, length);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);\n  return ret;\n}\nINTERCEPTOR(char *, RMD160Data, u8 *data, SIZE_T len, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, RMD160Data, data, len, buf);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  char *ret = REAL(RMD160Data)(data, len, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);\n  return ret;\n}\n#define INIT_RMD160                                                            \\\n  COMMON_INTERCEPT_FUNCTION(RMD160Init);                                       \\\n  COMMON_INTERCEPT_FUNCTION(RMD160Update);                                     \\\n  COMMON_INTERCEPT_FUNCTION(RMD160Final);                                      \\\n  COMMON_INTERCEPT_FUNCTION(RMD160Transform);                                  \\\n  COMMON_INTERCEPT_FUNCTION(RMD160End);                                        \\\n  COMMON_INTERCEPT_FUNCTION(RMD160File);                                       \\\n  COMMON_INTERCEPT_FUNCTION(RMD160FileChunk);                                  \\\n  COMMON_INTERCEPT_FUNCTION(RMD160Data)\n#else\n#define INIT_RMD160\n#endif\n\n#if SANITIZER_INTERCEPT_MD5\nINTERCEPTOR(void, MD5Init, void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD5Init, context);\n  REAL(MD5Init)(context);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD5_CTX_sz);\n}\n\nINTERCEPTOR(void, MD5Update, void *context, const unsigned char *data,\n            unsigned int len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD5Update, context, data, len);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);\n  REAL(MD5Update)(context, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD5_CTX_sz);\n}\n\nINTERCEPTOR(void, MD5Final, unsigned char digest[16], void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD5Final, digest, context);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);\n  REAL(MD5Final)(digest, context);\n  if (digest)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);\n}\n\nINTERCEPTOR(char *, MD5End, void *context, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD5End, context, buf);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);\n  char *ret = REAL(MD5End)(context, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);\n  return ret;\n}\n\nINTERCEPTOR(char *, MD5File, const char *filename, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD5File, filename, buf);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(MD5File)(filename, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);\n  return ret;\n}\n\nINTERCEPTOR(char *, MD5Data, const unsigned char *data, unsigned int len,\n            char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD5Data, data, len, buf);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  char *ret = REAL(MD5Data)(data, len, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);\n  return ret;\n}\n\n#define INIT_MD5                                                               \\\n  COMMON_INTERCEPT_FUNCTION(MD5Init);                                          \\\n  COMMON_INTERCEPT_FUNCTION(MD5Update);                                        \\\n  COMMON_INTERCEPT_FUNCTION(MD5Final);                                         \\\n  COMMON_INTERCEPT_FUNCTION(MD5End);                                           \\\n  COMMON_INTERCEPT_FUNCTION(MD5File);                                          \\\n  COMMON_INTERCEPT_FUNCTION(MD5Data)\n#else\n#define INIT_MD5\n#endif\n\n#if SANITIZER_INTERCEPT_FSEEK\nINTERCEPTOR(int, fseek, __sanitizer_FILE *stream, long int offset, int whence) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fseek, stream, offset, whence);\n  return REAL(fseek)(stream, offset, whence);\n}\nINTERCEPTOR(int, fseeko, __sanitizer_FILE *stream, OFF_T offset, int whence) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fseeko, stream, offset, whence);\n  return REAL(fseeko)(stream, offset, whence);\n}\nINTERCEPTOR(long int, ftell, __sanitizer_FILE *stream) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ftell, stream);\n  return REAL(ftell)(stream);\n}\nINTERCEPTOR(OFF_T, ftello, __sanitizer_FILE *stream) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, ftello, stream);\n  return REAL(ftello)(stream);\n}\nINTERCEPTOR(void, rewind, __sanitizer_FILE *stream) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, rewind, stream);\n  return REAL(rewind)(stream);\n}\nINTERCEPTOR(int, fgetpos, __sanitizer_FILE *stream, void *pos) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fgetpos, stream, pos);\n  int ret = REAL(fgetpos)(stream, pos);\n  if (pos && !ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pos, fpos_t_sz);\n  return ret;\n}\nINTERCEPTOR(int, fsetpos, __sanitizer_FILE *stream, const void *pos) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fsetpos, stream, pos);\n  if (pos)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, pos, fpos_t_sz);\n  return REAL(fsetpos)(stream, pos);\n}\n#define INIT_FSEEK \\\n  COMMON_INTERCEPT_FUNCTION(fseek); \\\n  COMMON_INTERCEPT_FUNCTION(fseeko); \\\n  COMMON_INTERCEPT_FUNCTION(ftell); \\\n  COMMON_INTERCEPT_FUNCTION(ftello); \\\n  COMMON_INTERCEPT_FUNCTION(rewind); \\\n  COMMON_INTERCEPT_FUNCTION(fgetpos); \\\n  COMMON_INTERCEPT_FUNCTION(fsetpos)\n#else\n#define INIT_FSEEK\n#endif\n\n#if SANITIZER_INTERCEPT_MD2\nINTERCEPTOR(void, MD2Init, void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD2Init, context);\n  REAL(MD2Init)(context);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD2_CTX_sz);\n}\n\nINTERCEPTOR(void, MD2Update, void *context, const unsigned char *data,\n            unsigned int len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD2Update, context, data, len);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);\n  REAL(MD2Update)(context, data, len);\n  if (context)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD2_CTX_sz);\n}\n\nINTERCEPTOR(void, MD2Final, unsigned char digest[16], void *context) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD2Final, digest, context);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);\n  REAL(MD2Final)(digest, context);\n  if (digest)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);\n}\n\nINTERCEPTOR(char *, MD2End, void *context, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD2End, context, buf);\n  if (context)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);\n  char *ret = REAL(MD2End)(context, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);\n  return ret;\n}\n\nINTERCEPTOR(char *, MD2File, const char *filename, char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD2File, filename, buf);\n  if (filename)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\n  char *ret = REAL(MD2File)(filename, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);\n  return ret;\n}\n\nINTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,\n            char *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, MD2Data, data, len, buf);\n  if (data && len > 0)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);\n  char *ret = REAL(MD2Data)(data, len, buf);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);\n  return ret;\n}\n\n#define INIT_MD2                                                               \\\n  COMMON_INTERCEPT_FUNCTION(MD2Init);                                          \\\n  COMMON_INTERCEPT_FUNCTION(MD2Update);                                        \\\n  COMMON_INTERCEPT_FUNCTION(MD2Final);                                         \\\n  COMMON_INTERCEPT_FUNCTION(MD2End);                                           \\\n  COMMON_INTERCEPT_FUNCTION(MD2File);                                          \\\n  COMMON_INTERCEPT_FUNCTION(MD2Data)\n#else\n#define INIT_MD2\n#endif\n\n#if SANITIZER_INTERCEPT_SHA2\n#define SHA2_INTERCEPTORS(LEN, SHA2_STATE_T) \\\n  INTERCEPTOR(void, SHA##LEN##_Init, void *context) { \\\n    void *ctx; \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Init, context); \\\n    REAL(SHA##LEN##_Init)(context); \\\n    if (context) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA##LEN##_CTX_sz); \\\n  } \\\n  INTERCEPTOR(void, SHA##LEN##_Update, void *context, \\\n              const u8 *data, SIZE_T len) { \\\n    void *ctx; \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Update, context, data, len); \\\n    if (data && len > 0) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len); \\\n    if (context) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \\\n    REAL(SHA##LEN##_Update)(context, data, len); \\\n    if (context) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA##LEN##_CTX_sz); \\\n  } \\\n  INTERCEPTOR(void, SHA##LEN##_Final, u8 digest[LEN/8], \\\n  void *context) { \\\n    void *ctx; \\\n    CHECK_EQ(SHA##LEN##_digest_length, LEN/8); \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Final, digest, context); \\\n    if (context) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \\\n    REAL(SHA##LEN##_Final)(digest, context); \\\n    if (digest) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, \\\n                                     sizeof(digest[0]) * \\\n  SHA##LEN##_digest_length); \\\n  } \\\n  INTERCEPTOR(char *, SHA##LEN##_End, void *context, char *buf) { \\\n    void *ctx; \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_End, context, buf); \\\n    if (context) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \\\n    char *ret = REAL(SHA##LEN##_End)(context, buf); \\\n    if (ret) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \\\n    return ret; \\\n  } \\\n  INTERCEPTOR(char *, SHA##LEN##_File, const char *filename, char *buf) { \\\n    void *ctx; \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_File, filename, buf); \\\n    if (filename) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\\\n    char *ret = REAL(SHA##LEN##_File)(filename, buf); \\\n    if (ret) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \\\n    return ret; \\\n  } \\\n  INTERCEPTOR(char *, SHA##LEN##_FileChunk, const char *filename, char *buf, \\\n              OFF_T offset, OFF_T length) { \\\n    void *ctx; \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_FileChunk, filename, buf, offset, \\\n  length); \\\n    if (filename) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\\\n    char *ret = REAL(SHA##LEN##_FileChunk)(filename, buf, offset, length); \\\n    if (ret) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \\\n    return ret; \\\n  } \\\n  INTERCEPTOR(char *, SHA##LEN##_Data, u8 *data, SIZE_T len, char *buf) { \\\n    void *ctx; \\\n    COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Data, data, len, buf); \\\n    if (data && len > 0) \\\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len); \\\n    char *ret = REAL(SHA##LEN##_Data)(data, len, buf); \\\n    if (ret) \\\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \\\n    return ret; \\\n  }\n\nSHA2_INTERCEPTORS(224, u32)\nSHA2_INTERCEPTORS(256, u32)\nSHA2_INTERCEPTORS(384, u64)\nSHA2_INTERCEPTORS(512, u64)\n\n#define INIT_SHA2_INTECEPTORS(LEN) \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Init); \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Update); \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Final); \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_End); \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_File); \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_FileChunk); \\\n  COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Data)\n\n#define INIT_SHA2 \\\n  INIT_SHA2_INTECEPTORS(224); \\\n  INIT_SHA2_INTECEPTORS(256); \\\n  INIT_SHA2_INTECEPTORS(384); \\\n  INIT_SHA2_INTECEPTORS(512)\n#undef SHA2_INTERCEPTORS\n#else\n#define INIT_SHA2\n#endif\n\n#if SANITIZER_INTERCEPT_VIS\nINTERCEPTOR(char *, vis, char *dst, int c, int flag, int nextc) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, vis, dst, c, flag, nextc);\n  char *end = REAL(vis)(dst, c, flag, nextc);\n  // dst is NULL terminated and end points to the NULL char\n  if (dst && end)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);\n  return end;\n}\nINTERCEPTOR(char *, nvis, char *dst, SIZE_T dlen, int c, int flag, int nextc) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, nvis, dst, dlen, c, flag, nextc);\n  char *end = REAL(nvis)(dst, dlen, c, flag, nextc);\n  // nvis cannot make sure the dst is NULL terminated\n  if (dst && end)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);\n  return end;\n}\nINTERCEPTOR(int, strvis, char *dst, const char *src, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strvis, dst, src, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int len = REAL(strvis)(dst, src, flag);\n  if (dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);\n  return len;\n}\nINTERCEPTOR(int, stravis, char **dst, const char *src, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, stravis, dst, src, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int len = REAL(stravis)(dst, src, flag);\n  if (dst) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(char *));\n    if (*dst)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *dst, len + 1);\n  }\n  return len;\n}\nINTERCEPTOR(int, strnvis, char *dst, SIZE_T dlen, const char *src, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strnvis, dst, dlen, src, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int len = REAL(strnvis)(dst, dlen, src, flag);\n  // The interface will be valid even if there is no space for NULL char\n  if (dst && len > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);\n  return len;\n}\nINTERCEPTOR(int, strvisx, char *dst, const char *src, SIZE_T len, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strvisx, dst, src, len, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);\n  int ret = REAL(strvisx)(dst, src, len, flag);\n  if (dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,\n            int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strnvisx, dst, dlen, src, len, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);\n  int ret = REAL(strnvisx)(dst, dlen, src, len, flag);\n  if (dst && ret >= 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strenvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,\n            int flag, int *cerr_ptr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strenvisx, dst, dlen, src, len, flag, cerr_ptr);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);\n  // FIXME: only need to be checked when \"flag | VIS_NOLOCALE\" doesn't hold\n  // according to the implementation\n  if (cerr_ptr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cerr_ptr, sizeof(int));\n  int ret = REAL(strenvisx)(dst, dlen, src, len, flag, cerr_ptr);\n  if (dst && ret >= 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  if (cerr_ptr)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cerr_ptr, sizeof(int));\n  return ret;\n}\nINTERCEPTOR(char *, svis, char *dst, int c, int flag, int nextc,\n            const char *extra) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, svis, dst, c, flag, nextc, extra);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  char *end = REAL(svis)(dst, c, flag, nextc, extra);\n  if (dst && end)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);\n  return end;\n}\nINTERCEPTOR(char *, snvis, char *dst, SIZE_T dlen, int c, int flag, int nextc,\n            const char *extra) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, snvis, dst, dlen, c, flag, nextc, extra);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  char *end = REAL(snvis)(dst, dlen, c, flag, nextc, extra);\n  if (dst && end)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst,\n                                   Min((SIZE_T)(end - dst + 1), dlen));\n  return end;\n}\nINTERCEPTOR(int, strsvis, char *dst, const char *src, int flag,\n            const char *extra) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strsvis, dst, src, flag, extra);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  int len = REAL(strsvis)(dst, src, flag, extra);\n  if (dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);\n  return len;\n}\nINTERCEPTOR(int, strsnvis, char *dst, SIZE_T dlen, const char *src, int flag,\n            const char *extra) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strsnvis, dst, dlen, src, flag, extra);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  int len = REAL(strsnvis)(dst, dlen, src, flag, extra);\n  // The interface will be valid even if there is no space for NULL char\n  if (dst && len >= 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);\n  return len;\n}\nINTERCEPTOR(int, strsvisx, char *dst, const char *src, SIZE_T len, int flag,\n            const char *extra) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strsvisx, dst, src, len, flag, extra);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  int ret = REAL(strsvisx)(dst, src, len, flag, extra);\n  if (dst)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strsnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,\n            int flag, const char *extra) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strsnvisx, dst, dlen, src, len, flag, extra);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  int ret = REAL(strsnvisx)(dst, dlen, src, len, flag, extra);\n  if (dst && ret >= 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strsenvisx, char *dst, SIZE_T dlen, const char *src,\n            SIZE_T len, int flag, const char *extra, int *cerr_ptr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strsenvisx, dst, dlen, src, len, flag, extra,\n                           cerr_ptr);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);\n  if (extra)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);\n  // FIXME: only need to be checked when \"flag | VIS_NOLOCALE\" doesn't hold\n  // according to the implementation\n  if (cerr_ptr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cerr_ptr, sizeof(int));\n  int ret = REAL(strsenvisx)(dst, dlen, src, len, flag, extra, cerr_ptr);\n  if (dst && ret >= 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  if (cerr_ptr)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cerr_ptr, sizeof(int));\n  return ret;\n}\nINTERCEPTOR(int, unvis, char *cp, int c, int *astate, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, unvis, cp, c, astate, flag);\n  if (astate)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, astate, sizeof(*astate));\n  int ret = REAL(unvis)(cp, c, astate, flag);\n  if (ret == unvis_valid || ret == unvis_validpush) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cp, sizeof(*cp));\n  }\n  return ret;\n}\nINTERCEPTOR(int, strunvis, char *dst, const char *src) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strunvis, dst, src);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int ret = REAL(strunvis)(dst, src);\n  if (ret != -1)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strnunvis, char *dst, SIZE_T dlen, const char *src) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strnunvis, dst, dlen, src);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int ret = REAL(strnunvis)(dst, dlen, src);\n  if (ret != -1)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strunvisx, char *dst, const char *src, int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strunvisx, dst, src, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int ret = REAL(strunvisx)(dst, src, flag);\n  if (ret != -1)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\nINTERCEPTOR(int, strnunvisx, char *dst, SIZE_T dlen, const char *src,\n            int flag) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, strnunvisx, dst, dlen, src, flag);\n  if (src)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);\n  int ret = REAL(strnunvisx)(dst, dlen, src, flag);\n  if (ret != -1)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);\n  return ret;\n}\n#define INIT_VIS                                                               \\\n  COMMON_INTERCEPT_FUNCTION(vis);                                              \\\n  COMMON_INTERCEPT_FUNCTION(nvis);                                             \\\n  COMMON_INTERCEPT_FUNCTION(strvis);                                           \\\n  COMMON_INTERCEPT_FUNCTION(stravis);                                          \\\n  COMMON_INTERCEPT_FUNCTION(strnvis);                                          \\\n  COMMON_INTERCEPT_FUNCTION(strvisx);                                          \\\n  COMMON_INTERCEPT_FUNCTION(strnvisx);                                         \\\n  COMMON_INTERCEPT_FUNCTION(strenvisx);                                        \\\n  COMMON_INTERCEPT_FUNCTION(svis);                                             \\\n  COMMON_INTERCEPT_FUNCTION(snvis);                                            \\\n  COMMON_INTERCEPT_FUNCTION(strsvis);                                          \\\n  COMMON_INTERCEPT_FUNCTION(strsnvis);                                         \\\n  COMMON_INTERCEPT_FUNCTION(strsvisx);                                         \\\n  COMMON_INTERCEPT_FUNCTION(strsnvisx);                                        \\\n  COMMON_INTERCEPT_FUNCTION(strsenvisx);                                       \\\n  COMMON_INTERCEPT_FUNCTION(unvis);                                            \\\n  COMMON_INTERCEPT_FUNCTION(strunvis);                                         \\\n  COMMON_INTERCEPT_FUNCTION(strnunvis);                                        \\\n  COMMON_INTERCEPT_FUNCTION(strunvisx);                                        \\\n  COMMON_INTERCEPT_FUNCTION(strnunvisx)\n#else\n#define INIT_VIS\n#endif\n\n#if SANITIZER_INTERCEPT_CDB\nINTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open, const char *path, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open, path, flags);\n  if (path)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  struct __sanitizer_cdbr *cdbr = REAL(cdbr_open)(path, flags);\n  if (cdbr)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));\n  return cdbr;\n}\n\nINTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open_mem, void *base, SIZE_T size,\n  int flags, void (*unmap)(void *, void *, SIZE_T), void *cookie) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open_mem, base, size, flags, unmap,\n    cookie);\n  if (base && size)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, base, size);\n  struct __sanitizer_cdbr *cdbr =\n    REAL(cdbr_open_mem)(base, size, flags, unmap, cookie);\n  if (cdbr)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));\n  return cdbr;\n}\n\nINTERCEPTOR(u32, cdbr_entries, struct __sanitizer_cdbr *cdbr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbr_entries, cdbr);\n  if (cdbr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));\n  return REAL(cdbr_entries)(cdbr);\n}\n\nINTERCEPTOR(int, cdbr_get, struct __sanitizer_cdbr *cdbr, u32 index,\n            const void **data, SIZE_T *datalen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbr_get, cdbr, index, data, datalen);\n  if (cdbr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));\n  int ret = REAL(cdbr_get)(cdbr, index, data, datalen);\n  if (!ret) {\n    if (data)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(*data));\n    if (datalen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datalen, sizeof(*datalen));\n    if (data && datalen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *data, *datalen);\n  }\n  return ret;\n}\n\nINTERCEPTOR(int, cdbr_find, struct __sanitizer_cdbr *cdbr, const void *key,\n            SIZE_T keylen, const void **data, SIZE_T *datalen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbr_find, cdbr, key, keylen, data, datalen);\n  if (cdbr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));\n  if (key)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);\n  int ret = REAL(cdbr_find)(cdbr, key, keylen, data, datalen);\n  if (!ret) {\n    if (data)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(*data));\n    if (datalen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datalen, sizeof(*datalen));\n    if (data && datalen)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *data, *datalen);\n  }\n  return ret;\n}\n\nINTERCEPTOR(void, cdbr_close, struct __sanitizer_cdbr *cdbr) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbr_close, cdbr);\n  if (cdbr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));\n  REAL(cdbr_close)(cdbr);\n}\n\nINTERCEPTOR(struct __sanitizer_cdbw *, cdbw_open) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbw_open);\n  struct __sanitizer_cdbw *ret = REAL(cdbw_open)();\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret));\n  return ret;\n}\n\nINTERCEPTOR(int, cdbw_put, struct __sanitizer_cdbw *cdbw, const void *key,\n  SIZE_T keylen, const void *data, SIZE_T datalen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put, cdbw, key, keylen, data, datalen);\n  if (cdbw)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));\n  if (data && datalen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, datalen);\n  if (key && keylen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);\n  int ret = REAL(cdbw_put)(cdbw, key, keylen, data, datalen);\n  if (!ret && cdbw)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));\n  return ret;\n}\n\nINTERCEPTOR(int, cdbw_put_data, struct __sanitizer_cdbw *cdbw, const void *data,\n  SIZE_T datalen, u32 *index) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put_data, cdbw, data, datalen, index);\n  if (cdbw)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));\n  if (data && datalen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, data, datalen);\n  int ret = REAL(cdbw_put_data)(cdbw, data, datalen, index);\n  if (!ret) {\n    if (index)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, index, sizeof(*index));\n    if (cdbw)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));\n  }\n  return ret;\n}\n\nINTERCEPTOR(int, cdbw_put_key, struct __sanitizer_cdbw *cdbw, const void *key,\n  SIZE_T keylen, u32 index) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put_key, cdbw, key, keylen, index);\n  if (cdbw)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));\n  if (key && keylen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);\n  int ret = REAL(cdbw_put_key)(cdbw, key, keylen, index);\n  if (!ret && cdbw)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));\n  return ret;\n}\n\nINTERCEPTOR(int, cdbw_output, struct __sanitizer_cdbw *cdbw, int output,\n  const char descr[16], u32 (*seedgen)(void)) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbw_output, cdbw, output, descr, seedgen);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, output);\n  if (cdbw)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));\n  if (descr)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, descr, internal_strnlen(descr, 16));\n  if (seedgen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)seedgen, sizeof(seedgen));\n  int ret = REAL(cdbw_output)(cdbw, output, descr, seedgen);\n  if (!ret) {\n    if (cdbw)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));\n    if (output >= 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, output);\n  }\n  return ret;\n}\n\nINTERCEPTOR(void, cdbw_close, struct __sanitizer_cdbw *cdbw) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, cdbw_close, cdbw);\n  if (cdbw)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));\n  REAL(cdbw_close)(cdbw);\n}\n\n#define INIT_CDB \\\n  COMMON_INTERCEPT_FUNCTION(cdbr_open); \\\n  COMMON_INTERCEPT_FUNCTION(cdbr_open_mem); \\\n  COMMON_INTERCEPT_FUNCTION(cdbr_entries); \\\n  COMMON_INTERCEPT_FUNCTION(cdbr_get); \\\n  COMMON_INTERCEPT_FUNCTION(cdbr_find); \\\n  COMMON_INTERCEPT_FUNCTION(cdbr_close); \\\n  COMMON_INTERCEPT_FUNCTION(cdbw_open); \\\n  COMMON_INTERCEPT_FUNCTION(cdbw_put); \\\n  COMMON_INTERCEPT_FUNCTION(cdbw_put_data); \\\n  COMMON_INTERCEPT_FUNCTION(cdbw_put_key); \\\n  COMMON_INTERCEPT_FUNCTION(cdbw_output); \\\n  COMMON_INTERCEPT_FUNCTION(cdbw_close)\n#else\n#define INIT_CDB\n#endif\n\n#if SANITIZER_INTERCEPT_GETFSENT\nINTERCEPTOR(void *, getfsent) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getfsent);\n  void *ret = REAL(getfsent)();\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);\n  return ret;\n}\n\nINTERCEPTOR(void *, getfsspec, const char *spec) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getfsspec, spec);\n  if (spec)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, internal_strlen(spec) + 1);\n  void *ret = REAL(getfsspec)(spec);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);\n  return ret;\n}\n\nINTERCEPTOR(void *, getfsfile, const char *file) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getfsfile, file);\n  if (file)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);\n  void *ret = REAL(getfsfile)(file);\n  if (ret)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);\n  return ret;\n}\n\n#define INIT_GETFSENT \\\n  COMMON_INTERCEPT_FUNCTION(getfsent); \\\n  COMMON_INTERCEPT_FUNCTION(getfsspec); \\\n  COMMON_INTERCEPT_FUNCTION(getfsfile);\n#else\n#define INIT_GETFSENT\n#endif\n\n#if SANITIZER_INTERCEPT_ARC4RANDOM\nINTERCEPTOR(void, arc4random_buf, void *buf, SIZE_T len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, arc4random_buf, buf, len);\n  REAL(arc4random_buf)(buf, len);\n  if (buf && len)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, len);\n}\n\nINTERCEPTOR(void, arc4random_addrandom, u8 *dat, int datlen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, arc4random_addrandom, dat, datlen);\n  if (dat && datlen)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, dat, datlen);\n  REAL(arc4random_addrandom)(dat, datlen);\n}\n\n#define INIT_ARC4RANDOM \\\n  COMMON_INTERCEPT_FUNCTION(arc4random_buf); \\\n  COMMON_INTERCEPT_FUNCTION(arc4random_addrandom);\n#else\n#define INIT_ARC4RANDOM\n#endif\n\n#if SANITIZER_INTERCEPT_POPEN\nINTERCEPTOR(__sanitizer_FILE *, popen, const char *command, const char *type) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, popen, command, type);\n  if (command)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, command, internal_strlen(command) + 1);\n  if (type)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);\n  __sanitizer_FILE *res = REAL(popen)(command, type);\n  COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);\n  if (res) unpoison_file(res);\n  return res;\n}\n#define INIT_POPEN COMMON_INTERCEPT_FUNCTION(popen)\n#else\n#define INIT_POPEN\n#endif\n\n#if SANITIZER_INTERCEPT_POPENVE\nINTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,\n            char *const *argv, char *const *envp, const char *type) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, popenve, path, argv, envp, type);\n  if (path)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  if (argv) {\n    for (char *const *pa = argv; ; ++pa) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));\n      if (!*pa)\n        break;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);\n    }\n  }\n  if (envp) {\n    for (char *const *pa = envp; ; ++pa) {\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));\n      if (!*pa)\n        break;\n      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);\n    }\n  }\n  if (type)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);\n  __sanitizer_FILE *res = REAL(popenve)(path, argv, envp, type);\n  COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);\n  if (res) unpoison_file(res);\n  return res;\n}\n#define INIT_POPENVE COMMON_INTERCEPT_FUNCTION(popenve)\n#else\n#define INIT_POPENVE\n#endif\n\n#if SANITIZER_INTERCEPT_PCLOSE\nINTERCEPTOR(int, pclose, __sanitizer_FILE *fp) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, pclose, fp);\n  COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);\n  const FileMetadata *m = GetInterceptorMetadata(fp);\n  int res = REAL(pclose)(fp);\n  if (m) {\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);\n    DeleteInterceptorMetadata(fp);\n  }\n  return res;\n}\n#define INIT_PCLOSE COMMON_INTERCEPT_FUNCTION(pclose);\n#else\n#define INIT_PCLOSE\n#endif\n\n#if SANITIZER_INTERCEPT_FUNOPEN\ntypedef int (*funopen_readfn)(void *cookie, char *buf, int len);\ntypedef int (*funopen_writefn)(void *cookie, const char *buf, int len);\ntypedef OFF_T (*funopen_seekfn)(void *cookie, OFF_T offset, int whence);\ntypedef int (*funopen_closefn)(void *cookie);\n\nstruct WrappedFunopenCookie {\n  void *real_cookie;\n  funopen_readfn real_read;\n  funopen_writefn real_write;\n  funopen_seekfn real_seek;\n  funopen_closefn real_close;\n};\n\nstatic int wrapped_funopen_read(void *cookie, char *buf, int len) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;\n  funopen_readfn real_read = wrapped_cookie->real_read;\n  return real_read(wrapped_cookie->real_cookie, buf, len);\n}\n\nstatic int wrapped_funopen_write(void *cookie, const char *buf, int len) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;\n  funopen_writefn real_write = wrapped_cookie->real_write;\n  return real_write(wrapped_cookie->real_cookie, buf, len);\n}\n\nstatic OFF_T wrapped_funopen_seek(void *cookie, OFF_T offset, int whence) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;\n  funopen_seekfn real_seek = wrapped_cookie->real_seek;\n  return real_seek(wrapped_cookie->real_cookie, offset, whence);\n}\n\nstatic int wrapped_funopen_close(void *cookie) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;\n  funopen_closefn real_close = wrapped_cookie->real_close;\n  int res = real_close(wrapped_cookie->real_cookie);\n  InternalFree(wrapped_cookie);\n  return res;\n}\n\nINTERCEPTOR(__sanitizer_FILE *, funopen, void *cookie, funopen_readfn readfn,\n            funopen_writefn writefn, funopen_seekfn seekfn,\n            funopen_closefn closefn) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, funopen, cookie, readfn, writefn, seekfn,\n                           closefn);\n\n  WrappedFunopenCookie *wrapped_cookie =\n      (WrappedFunopenCookie *)InternalAlloc(sizeof(WrappedFunopenCookie));\n  wrapped_cookie->real_cookie = cookie;\n  wrapped_cookie->real_read = readfn;\n  wrapped_cookie->real_write = writefn;\n  wrapped_cookie->real_seek = seekfn;\n  wrapped_cookie->real_close = closefn;\n\n  __sanitizer_FILE *res =\n      REAL(funopen)(wrapped_cookie,\n                    readfn  ? wrapped_funopen_read  : nullptr,\n                    writefn ? wrapped_funopen_write : nullptr,\n                    seekfn  ? wrapped_funopen_seek  : nullptr,\n                    closefn ? wrapped_funopen_close : nullptr);\n  if (res)\n    unpoison_file(res);\n  return res;\n}\n#define INIT_FUNOPEN COMMON_INTERCEPT_FUNCTION(funopen)\n#else\n#define INIT_FUNOPEN\n#endif\n\n#if SANITIZER_INTERCEPT_FUNOPEN2\ntypedef SSIZE_T (*funopen2_readfn)(void *cookie, void *buf, SIZE_T len);\ntypedef SSIZE_T (*funopen2_writefn)(void *cookie, const void *buf, SIZE_T len);\ntypedef OFF_T (*funopen2_seekfn)(void *cookie, OFF_T offset, int whence);\ntypedef int (*funopen2_flushfn)(void *cookie);\ntypedef int (*funopen2_closefn)(void *cookie);\n\nstruct WrappedFunopen2Cookie {\n  void *real_cookie;\n  funopen2_readfn real_read;\n  funopen2_writefn real_write;\n  funopen2_seekfn real_seek;\n  funopen2_flushfn real_flush;\n  funopen2_closefn real_close;\n};\n\nstatic SSIZE_T wrapped_funopen2_read(void *cookie, void *buf, SIZE_T len) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;\n  funopen2_readfn real_read = wrapped_cookie->real_read;\n  return real_read(wrapped_cookie->real_cookie, buf, len);\n}\n\nstatic SSIZE_T wrapped_funopen2_write(void *cookie, const void *buf,\n                                      SIZE_T len) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;\n  funopen2_writefn real_write = wrapped_cookie->real_write;\n  return real_write(wrapped_cookie->real_cookie, buf, len);\n}\n\nstatic OFF_T wrapped_funopen2_seek(void *cookie, OFF_T offset, int whence) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;\n  funopen2_seekfn real_seek = wrapped_cookie->real_seek;\n  return real_seek(wrapped_cookie->real_cookie, offset, whence);\n}\n\nstatic int wrapped_funopen2_flush(void *cookie) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;\n  funopen2_flushfn real_flush = wrapped_cookie->real_flush;\n  return real_flush(wrapped_cookie->real_cookie);\n}\n\nstatic int wrapped_funopen2_close(void *cookie) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(1);\n  WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;\n  funopen2_closefn real_close = wrapped_cookie->real_close;\n  int res = real_close(wrapped_cookie->real_cookie);\n  InternalFree(wrapped_cookie);\n  return res;\n}\n\nINTERCEPTOR(__sanitizer_FILE *, funopen2, void *cookie, funopen2_readfn readfn,\n            funopen2_writefn writefn, funopen2_seekfn seekfn,\n            funopen2_flushfn flushfn, funopen2_closefn closefn) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, funopen2, cookie, readfn, writefn, seekfn,\n                           flushfn, closefn);\n\n  WrappedFunopen2Cookie *wrapped_cookie =\n      (WrappedFunopen2Cookie *)InternalAlloc(sizeof(WrappedFunopen2Cookie));\n  wrapped_cookie->real_cookie = cookie;\n  wrapped_cookie->real_read = readfn;\n  wrapped_cookie->real_write = writefn;\n  wrapped_cookie->real_seek = seekfn;\n  wrapped_cookie->real_flush = flushfn;\n  wrapped_cookie->real_close = closefn;\n\n  __sanitizer_FILE *res =\n      REAL(funopen2)(wrapped_cookie,\n                     readfn  ? wrapped_funopen2_read  : nullptr,\n                     writefn ? wrapped_funopen2_write : nullptr,\n                     seekfn  ? wrapped_funopen2_seek  : nullptr,\n                     flushfn ? wrapped_funopen2_flush : nullptr,\n                     closefn ? wrapped_funopen2_close : nullptr);\n  if (res)\n    unpoison_file(res);\n  return res;\n}\n#define INIT_FUNOPEN2 COMMON_INTERCEPT_FUNCTION(funopen2)\n#else\n#define INIT_FUNOPEN2\n#endif\n\n#if SANITIZER_INTERCEPT_FDEVNAME\nINTERCEPTOR(char *, fdevname,  int fd) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fdevname, fd);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  char *name = REAL(fdevname)(fd);\n  if (name) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);\n    if (fd > 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return name;\n}\n\nINTERCEPTOR(char *, fdevname_r,  int fd, char *buf, SIZE_T len) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fdevname_r, fd, buf, len);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  char *name = REAL(fdevname_r)(fd, buf, len);\n  if (name && buf && len > 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);\n    if (fd > 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return name;\n}\n\n#define INIT_FDEVNAME \\\n  COMMON_INTERCEPT_FUNCTION(fdevname); \\\n  COMMON_INTERCEPT_FUNCTION(fdevname_r);\n#else\n#define INIT_FDEVNAME\n#endif\n\n#if SANITIZER_INTERCEPT_GETUSERSHELL\nINTERCEPTOR(char *, getusershell) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getusershell);\n  char *res = REAL(getusershell)();\n  if (res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\n\n#define INIT_GETUSERSHELL COMMON_INTERCEPT_FUNCTION(getusershell);\n#else\n#define INIT_GETUSERSHELL\n#endif\n\n#if SANITIZER_INTERCEPT_SL_INIT\nINTERCEPTOR(void *, sl_init) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sl_init);\n  void *res = REAL(sl_init)();\n  if (res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer::struct_StringList_sz);\n  return res;\n}\n\nINTERCEPTOR(int, sl_add, void *sl, char *item) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sl_add, sl, item);\n  if (sl)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);\n  if (item)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);\n  int res = REAL(sl_add)(sl, item);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);\n  return res;\n}\n\nINTERCEPTOR(char *, sl_find, void *sl, const char *item) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sl_find, sl, item);\n  if (sl)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);\n  if (item)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);\n  char *res = REAL(sl_find)(sl, item);\n  if (res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);\n  return res;\n}\n\nINTERCEPTOR(void, sl_free, void *sl, int freeall) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sl_free, sl, freeall);\n  if (sl)\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);\n  REAL(sl_free)(sl, freeall);\n}\n\n#define INIT_SL_INIT                  \\\n  COMMON_INTERCEPT_FUNCTION(sl_init); \\\n  COMMON_INTERCEPT_FUNCTION(sl_add);  \\\n  COMMON_INTERCEPT_FUNCTION(sl_find); \\\n  COMMON_INTERCEPT_FUNCTION(sl_free);\n#else\n#define INIT_SL_INIT\n#endif\n\n#if SANITIZER_INTERCEPT_GETRANDOM\nINTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getrandom, buf, buflen, flags);\n  SSIZE_T n = REAL(getrandom)(buf, buflen, flags);\n  if (n > 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, n);\n  }\n  return n;\n}\n#define INIT_GETRANDOM COMMON_INTERCEPT_FUNCTION(getrandom)\n#else\n#define INIT_GETRANDOM\n#endif\n\n#if SANITIZER_INTERCEPT_CRYPT\nINTERCEPTOR(char *, crypt, char *key, char *salt) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, crypt, key, salt);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);\n  char *res = REAL(crypt)(key, salt);\n  if (res != nullptr)\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  return res;\n}\n#define INIT_CRYPT COMMON_INTERCEPT_FUNCTION(crypt);\n#else\n#define INIT_CRYPT\n#endif\n\n#if SANITIZER_INTERCEPT_CRYPT_R\nINTERCEPTOR(char *, crypt_r, char *key, char *salt, void *data) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, crypt_r, key, salt, data);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);\n  char *res = REAL(crypt_r)(key, salt, data);\n  if (res != nullptr) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data,\n                                   __sanitizer::struct_crypt_data_sz);\n    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);\n  }\n  return res;\n}\n#define INIT_CRYPT_R COMMON_INTERCEPT_FUNCTION(crypt_r);\n#else\n#define INIT_CRYPT_R\n#endif\n\n#if SANITIZER_INTERCEPT_GETENTROPY\nINTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getentropy, buf, buflen);\n  int r = REAL(getentropy)(buf, buflen);\n  if (r == 0) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);\n  }\n  return r;\n}\n#define INIT_GETENTROPY COMMON_INTERCEPT_FUNCTION(getentropy)\n#else\n#define INIT_GETENTROPY\n#endif\n\n#if SANITIZER_INTERCEPT_QSORT_R\ntypedef int (*qsort_r_compar_f)(const void *, const void *, void *);\nstruct qsort_r_compar_params {\n  SIZE_T size;\n  qsort_r_compar_f compar;\n  void *arg;\n};\nstatic int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {\n  qsort_r_compar_params *params = (qsort_r_compar_params *)arg;\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, params->size);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, params->size);\n  return params->compar(a, b, params->arg);\n}\n\nINTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,\n            qsort_r_compar_f compar, void *arg) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);\n  // Run the comparator over all array elements to detect any memory issues.\n  if (nmemb > 1) {\n    for (SIZE_T i = 0; i < nmemb - 1; ++i) {\n      void *p = (void *)((char *)base + i * size);\n      void *q = (void *)((char *)base + (i + 1) * size);\n      COMMON_INTERCEPTOR_UNPOISON_PARAM(3);\n      compar(p, q, arg);\n    }\n  }\n  qsort_r_compar_params params = {size, compar, arg};\n  REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, &params);\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);\n}\n#  define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)\n#else\n#  define INIT_QSORT_R\n#endif\n\n#if SANITIZER_INTERCEPT_QSORT && SANITIZER_INTERCEPT_QSORT_R\nINTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,\n            qsort_r_compar_f compar) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);\n  WRAP(qsort_r)(base, nmemb, size, compar, nullptr);\n}\n#  define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)\n#elif SANITIZER_INTERCEPT_QSORT && !SANITIZER_INTERCEPT_QSORT_R\n// Glibc qsort uses a temporary buffer allocated either on stack or on heap.\n// Poisoned memory from there may get copied into the comparator arguments,\n// where it needs to be dealt with. But even that is not enough - the results of\n// the sort may be copied into the input/output array based on the results of\n// the comparator calls, but directly from the temp memory, bypassing the\n// unpoisoning done in wrapped_qsort_compar. We deal with this by, again,\n// unpoisoning the entire array after the sort is done.\n//\n// We can not check that the entire array is initialized at the beginning. IMHO,\n// it's fine for parts of the sorted objects to contain uninitialized memory,\n// ex. as padding in structs.\ntypedef int (*qsort_compar_f)(const void *, const void *);\nstatic THREADLOCAL qsort_compar_f qsort_compar;\nstatic THREADLOCAL SIZE_T qsort_size;\nstatic int wrapped_qsort_compar(const void *a, const void *b) {\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_size);\n  COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_size);\n  return qsort_compar(a, b);\n}\n\nINTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,\n            qsort_compar_f compar) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);\n  // Run the comparator over all array elements to detect any memory issues.\n  if (nmemb > 1) {\n    for (SIZE_T i = 0; i < nmemb - 1; ++i) {\n      void *p = (void *)((char *)base + i * size);\n      void *q = (void *)((char *)base + (i + 1) * size);\n      COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n      compar(p, q);\n    }\n  }\n  qsort_compar_f old_compar = qsort_compar;\n  SIZE_T old_size = qsort_size;\n  // Handle qsort() implementations that recurse using an\n  // interposable function call:\n  bool already_wrapped = compar == wrapped_qsort_compar;\n  if (already_wrapped) {\n    // This case should only happen if the qsort() implementation calls itself\n    // using a preemptible function call (e.g. the FreeBSD libc version).\n    // Check that the size and comparator arguments are as expected.\n    CHECK_NE(compar, qsort_compar);\n    CHECK_EQ(qsort_size, size);\n  } else {\n    qsort_compar = compar;\n    qsort_size = size;\n  }\n  REAL(qsort)(base, nmemb, size, wrapped_qsort_compar);\n  if (!already_wrapped) {\n    qsort_compar = old_compar;\n    qsort_size = old_size;\n  }\n  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);\n}\n#  define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)\n#else\n#  define INIT_QSORT\n#endif\n\n#if SANITIZER_INTERCEPT_BSEARCH\ntypedef int (*bsearch_compar_f)(const void *, const void *);\nstruct bsearch_compar_params {\n  const void *key;\n  bsearch_compar_f compar;\n};\n\nstatic int wrapped_bsearch_compar(const void *key, const void *b) {\n  const bsearch_compar_params *params = (const bsearch_compar_params *)key;\n  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);\n  return params->compar(params->key, b);\n}\n\nINTERCEPTOR(void *, bsearch, const void *key, const void *base, SIZE_T nmemb,\n            SIZE_T size, bsearch_compar_f compar) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, bsearch, key, base, nmemb, size, compar);\n  bsearch_compar_params params = {key, compar};\n  return REAL(bsearch)(&params, base, nmemb, size, wrapped_bsearch_compar);\n}\n#  define INIT_BSEARCH COMMON_INTERCEPT_FUNCTION(bsearch)\n#else\n#  define INIT_BSEARCH\n#endif\n\n#if SANITIZER_INTERCEPT_SIGALTSTACK\nINTERCEPTOR(int, sigaltstack, void *ss, void *oss) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, sigaltstack, ss, oss);\n  int r = REAL(sigaltstack)(ss, oss);\n  if (r == 0 && oss != nullptr) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oss, struct_stack_t_sz);\n  }\n  return r;\n}\n#define INIT_SIGALTSTACK COMMON_INTERCEPT_FUNCTION(sigaltstack)\n#else\n#define INIT_SIGALTSTACK\n#endif\n\n#if SANITIZER_INTERCEPT_UNAME\nINTERCEPTOR(int, uname, struct utsname *utsname) {\n#if SANITIZER_LINUX\n  if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)\n    return internal_uname(utsname);\n#endif\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, uname, utsname);\n  int res = REAL(uname)(utsname);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, utsname,\n                                   __sanitizer::struct_utsname_sz);\n  return res;\n}\n#define INIT_UNAME COMMON_INTERCEPT_FUNCTION(uname)\n#else\n#define INIT_UNAME\n#endif\n\n#if SANITIZER_INTERCEPT___XUNAME\n// FreeBSD's <sys/utsname.h> define uname() as\n// static __inline int uname(struct utsname *name) {\n//   return __xuname(SYS_NMLN, (void*)name);\n// }\nINTERCEPTOR(int, __xuname, int size, void *utsname) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __xuname, size, utsname);\n  int res = REAL(__xuname)(size, utsname);\n  if (!res)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, utsname,\n                                   __sanitizer::struct_utsname_sz);\n  return res;\n}\n#define INIT___XUNAME COMMON_INTERCEPT_FUNCTION(__xuname)\n#else\n#define INIT___XUNAME\n#endif\n\n#include \"sanitizer_common_interceptors_netbsd_compat.inc\"\n\nstatic void InitializeCommonInterceptors() {\n#if SI_POSIX\n  static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];\n  interceptor_metadata_map = new ((void *)&metadata_mem) MetadataHashMap();\n#endif\n\n  INIT_MMAP;\n  INIT_MMAP64;\n  INIT_TEXTDOMAIN;\n  INIT_STRLEN;\n  INIT_STRNLEN;\n  INIT_STRNDUP;\n  INIT___STRNDUP;\n  INIT_STRCMP;\n  INIT_STRNCMP;\n  INIT_STRCASECMP;\n  INIT_STRNCASECMP;\n  INIT_STRSTR;\n  INIT_STRCASESTR;\n  INIT_STRCHR;\n  INIT_STRCHRNUL;\n  INIT_STRRCHR;\n  INIT_STRSPN;\n  INIT_STRTOK;\n  INIT_STRPBRK;\n  INIT_STRXFRM;\n  INIT___STRXFRM_L;\n  INIT_MEMSET;\n  INIT_MEMMOVE;\n  INIT_MEMCPY;\n  INIT_MEMCHR;\n  INIT_MEMCMP;\n  INIT_BCMP;\n  INIT_MEMRCHR;\n  INIT_MEMMEM;\n  INIT_READ;\n  INIT_FREAD;\n  INIT_PREAD;\n  INIT_PREAD64;\n  INIT_READV;\n  INIT_PREADV;\n  INIT_PREADV64;\n  INIT_WRITE;\n  INIT_FWRITE;\n  INIT_PWRITE;\n  INIT_PWRITE64;\n  INIT_WRITEV;\n  INIT_PWRITEV;\n  INIT_PWRITEV64;\n  INIT_FGETS;\n  INIT_FPUTS;\n  INIT_PUTS;\n  INIT_PRCTL;\n  INIT_LOCALTIME_AND_FRIENDS;\n  INIT_STRPTIME;\n  INIT_SCANF;\n  INIT_ISOC99_SCANF;\n  INIT_PRINTF;\n  INIT_PRINTF_L;\n  INIT_ISOC99_PRINTF;\n  INIT_FREXP;\n  INIT_FREXPF_FREXPL;\n  INIT_GETPWNAM_AND_FRIENDS;\n  INIT_GETPWNAM_R_AND_FRIENDS;\n  INIT_GETPWENT;\n  INIT_FGETPWENT;\n  INIT_GETPWENT_R;\n  INIT_FGETPWENT_R;\n  INIT_FGETGRENT_R;\n  INIT_SETPWENT;\n  INIT_CLOCK_GETTIME;\n  INIT_CLOCK_GETCPUCLOCKID;\n  INIT_GETITIMER;\n  INIT_TIME;\n  INIT_GLOB;\n  INIT_GLOB64;\n  INIT_POSIX_SPAWN;\n  INIT_WAIT;\n  INIT_WAIT4;\n  INIT_INET;\n  INIT_PTHREAD_GETSCHEDPARAM;\n  INIT_GETADDRINFO;\n  INIT_GETNAMEINFO;\n  INIT_GETSOCKNAME;\n  INIT_GETHOSTBYNAME;\n  INIT_GETHOSTBYNAME2;\n  INIT_GETHOSTBYNAME_R;\n  INIT_GETHOSTBYNAME2_R;\n  INIT_GETHOSTBYADDR_R;\n  INIT_GETHOSTENT_R;\n  INIT_GETSOCKOPT;\n  INIT_ACCEPT;\n  INIT_ACCEPT4;\n  INIT_PACCEPT;\n  INIT_MODF;\n  INIT_RECVMSG;\n  INIT_SENDMSG;\n  INIT_RECVMMSG;\n  INIT_SENDMMSG;\n  INIT_SYSMSG;\n  INIT_GETPEERNAME;\n  INIT_IOCTL;\n  INIT_INET_ATON;\n  INIT_SYSINFO;\n  INIT_READDIR;\n  INIT_READDIR64;\n  INIT_PTRACE;\n  INIT_SETLOCALE;\n  INIT_GETCWD;\n  INIT_GET_CURRENT_DIR_NAME;\n  INIT_STRTOIMAX;\n  INIT_MBSTOWCS;\n  INIT_MBSNRTOWCS;\n  INIT_WCSTOMBS;\n  INIT_WCSNRTOMBS;\n  INIT_WCRTOMB;\n  INIT_WCTOMB;\n  INIT_TCGETATTR;\n  INIT_REALPATH;\n  INIT_CANONICALIZE_FILE_NAME;\n  INIT_CONFSTR;\n  INIT_SCHED_GETAFFINITY;\n  INIT_SCHED_GETPARAM;\n  INIT_STRERROR;\n  INIT_STRERROR_R;\n  INIT_XPG_STRERROR_R;\n  INIT_SCANDIR;\n  INIT_SCANDIR64;\n  INIT_GETGROUPS;\n  INIT_POLL;\n  INIT_PPOLL;\n  INIT_WORDEXP;\n  INIT_SIGWAIT;\n  INIT_SIGWAITINFO;\n  INIT_SIGTIMEDWAIT;\n  INIT_SIGSETOPS;\n  INIT_SIGSET_LOGICOPS;\n  INIT_SIGPENDING;\n  INIT_SIGPROCMASK;\n  INIT_PTHREAD_SIGMASK;\n  INIT_BACKTRACE;\n  INIT__EXIT;\n  INIT_PTHREAD_MUTEX_LOCK;\n  INIT_PTHREAD_MUTEX_UNLOCK;\n  INIT___PTHREAD_MUTEX_LOCK;\n  INIT___PTHREAD_MUTEX_UNLOCK;\n  INIT___LIBC_MUTEX_LOCK;\n  INIT___LIBC_MUTEX_UNLOCK;\n  INIT___LIBC_THR_SETCANCELSTATE;\n  INIT_GETMNTENT;\n  INIT_GETMNTENT_R;\n  INIT_STATFS;\n  INIT_STATFS64;\n  INIT_STATVFS;\n  INIT_STATVFS64;\n  INIT_INITGROUPS;\n  INIT_ETHER_NTOA_ATON;\n  INIT_ETHER_HOST;\n  INIT_ETHER_R;\n  INIT_SHMCTL;\n  INIT_RANDOM_R;\n  INIT_PTHREAD_ATTR_GET;\n  INIT_PTHREAD_ATTR_GET_SCHED;\n  INIT_PTHREAD_ATTR_GETINHERITSCHED;\n  INIT_PTHREAD_ATTR_GETAFFINITY_NP;\n  INIT_PTHREAD_MUTEXATTR_GETPSHARED;\n  INIT_PTHREAD_MUTEXATTR_GETTYPE;\n  INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;\n  INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING;\n  INIT_PTHREAD_MUTEXATTR_GETROBUST;\n  INIT_PTHREAD_MUTEXATTR_GETROBUST_NP;\n  INIT_PTHREAD_RWLOCKATTR_GETPSHARED;\n  INIT_PTHREAD_RWLOCKATTR_GETKIND_NP;\n  INIT_PTHREAD_CONDATTR_GETPSHARED;\n  INIT_PTHREAD_CONDATTR_GETCLOCK;\n  INIT_PTHREAD_BARRIERATTR_GETPSHARED;\n  INIT_TMPNAM;\n  INIT_TMPNAM_R;\n  INIT_PTSNAME;\n  INIT_PTSNAME_R;\n  INIT_TTYNAME;\n  INIT_TTYNAME_R;\n  INIT_TEMPNAM;\n  INIT_PTHREAD_SETNAME_NP;\n  INIT_PTHREAD_GETNAME_NP;\n  INIT_SINCOS;\n  INIT_REMQUO;\n  INIT_REMQUOL;\n  INIT_LGAMMA;\n  INIT_LGAMMAL;\n  INIT_LGAMMA_R;\n  INIT_LGAMMAL_R;\n  INIT_DRAND48_R;\n  INIT_RAND_R;\n  INIT_GETLINE;\n  INIT_ICONV;\n  INIT_TIMES;\n  INIT_TLS_GET_ADDR;\n  INIT_LISTXATTR;\n  INIT_GETXATTR;\n  INIT_GETRESID;\n  INIT_GETIFADDRS;\n  INIT_IF_INDEXTONAME;\n  INIT_CAPGET;\n  INIT_AEABI_MEM;\n  INIT___BZERO;\n  INIT_BZERO;\n  INIT_FTIME;\n  INIT_XDR;\n  INIT_XDRREC_LINUX;\n  INIT_TSEARCH;\n  INIT_LIBIO_INTERNALS;\n  INIT_FOPEN;\n  INIT_FOPEN64;\n  INIT_FLOPEN;\n  INIT_OPEN_MEMSTREAM;\n  INIT_OBSTACK;\n  INIT_FFLUSH;\n  INIT_FCLOSE;\n  INIT_DLOPEN_DLCLOSE;\n  INIT_GETPASS;\n  INIT_TIMERFD;\n  INIT_MLOCKX;\n  INIT_FOPENCOOKIE;\n  INIT_SEM;\n  INIT_PTHREAD_SETCANCEL;\n  INIT_MINCORE;\n  INIT_PROCESS_VM_READV;\n  INIT_CTERMID;\n  INIT_CTERMID_R;\n  INIT_RECV_RECVFROM;\n  INIT_SEND_SENDTO;\n  INIT_STAT;\n  INIT_EVENTFD_READ_WRITE;\n  INIT_LSTAT;\n  INIT___XSTAT;\n  INIT___XSTAT64;\n  INIT___LXSTAT;\n  INIT___LXSTAT64;\n  // FIXME: add other *stat interceptors.\n  INIT_UTMP;\n  INIT_UTMPX;\n  INIT_GETLOADAVG;\n  INIT_WCSLEN;\n  INIT_WCSCAT;\n  INIT_WCSDUP;\n  INIT_WCSXFRM;\n  INIT___WCSXFRM_L;\n  INIT_ACCT;\n  INIT_USER_FROM_UID;\n  INIT_UID_FROM_USER;\n  INIT_GROUP_FROM_GID;\n  INIT_GID_FROM_GROUP;\n  INIT_ACCESS;\n  INIT_FACCESSAT;\n  INIT_GETGROUPLIST;\n  INIT_GETGROUPMEMBERSHIP;\n  INIT_READLINK;\n  INIT_READLINKAT;\n  INIT_NAME_TO_HANDLE_AT;\n  INIT_OPEN_BY_HANDLE_AT;\n  INIT_STRLCPY;\n  INIT_DEVNAME;\n  INIT_DEVNAME_R;\n  INIT_FGETLN;\n  INIT_STRMODE;\n  INIT_TTYENT;\n  INIT_PROTOENT;\n  INIT_PROTOENT_R;\n  INIT_NETENT;\n  INIT_GETMNTINFO;\n  INIT_MI_VECTOR_HASH;\n  INIT_SETVBUF;\n  INIT_GETVFSSTAT;\n  INIT_REGEX;\n  INIT_REGEXSUB;\n  INIT_FTS;\n  INIT_SYSCTL;\n  INIT_ASYSCTL;\n  INIT_SYSCTLGETMIBINFO;\n  INIT_NL_LANGINFO;\n  INIT_MODCTL;\n  INIT_STRTONUM;\n  INIT_FPARSELN;\n  INIT_STATVFS1;\n  INIT_STRTOI;\n  INIT_CAPSICUM;\n  INIT_SHA1;\n  INIT_MD4;\n  INIT_RMD160;\n  INIT_MD5;\n  INIT_FSEEK;\n  INIT_MD2;\n  INIT_SHA2;\n  INIT_VIS;\n  INIT_CDB;\n  INIT_GETFSENT;\n  INIT_ARC4RANDOM;\n  INIT_POPEN;\n  INIT_POPENVE;\n  INIT_PCLOSE;\n  INIT_FUNOPEN;\n  INIT_FUNOPEN2;\n  INIT_FDEVNAME;\n  INIT_GETUSERSHELL;\n  INIT_SL_INIT;\n  INIT_GETRANDOM;\n  INIT_CRYPT;\n  INIT_CRYPT_R;\n  INIT_GETENTROPY;\n  INIT_QSORT;\n  INIT_QSORT_R;\n  INIT_BSEARCH;\n  INIT_SIGALTSTACK;\n  INIT_UNAME;\n  INIT___XUNAME;\n\n  INIT___PRINTF_CHK;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_format.inc",
    "content": "//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Scanf/printf implementation for use in *Sanitizer interceptors.\n// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html\n// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html\n// with a few common GNU extensions.\n//\n//===----------------------------------------------------------------------===//\n\n#include <stdarg.h>\n\nstatic const char *parse_number(const char *p, int *out) {\n  *out = internal_atoll(p);\n  while (*p >= '0' && *p <= '9')\n    ++p;\n  return p;\n}\n\nstatic const char *maybe_parse_param_index(const char *p, int *out) {\n  // n$\n  if (*p >= '0' && *p <= '9') {\n    int number;\n    const char *q = parse_number(p, &number);\n    CHECK(q);\n    if (*q == '$') {\n      *out = number;\n      p = q + 1;\n    }\n  }\n\n  // Otherwise, do not change p. This will be re-parsed later as the field\n  // width.\n  return p;\n}\n\nstatic bool char_is_one_of(char c, const char *s) {\n  return !!internal_strchr(s, c);\n}\n\nstatic const char *maybe_parse_length_modifier(const char *p, char ll[2]) {\n  if (char_is_one_of(*p, \"jztLq\")) {\n    ll[0] = *p;\n    ++p;\n  } else if (*p == 'h') {\n    ll[0] = 'h';\n    ++p;\n    if (*p == 'h') {\n      ll[1] = 'h';\n      ++p;\n    }\n  } else if (*p == 'l') {\n    ll[0] = 'l';\n    ++p;\n    if (*p == 'l') {\n      ll[1] = 'l';\n      ++p;\n    }\n  }\n  return p;\n}\n\n// Returns true if the character is an integer conversion specifier.\nstatic bool format_is_integer_conv(char c) {\n  return char_is_one_of(c, \"diouxXn\");\n}\n\n// Returns true if the character is an floating point conversion specifier.\nstatic bool format_is_float_conv(char c) {\n  return char_is_one_of(c, \"aAeEfFgG\");\n}\n\n// Returns string output character size for string-like conversions,\n// or 0 if the conversion is invalid.\nstatic int format_get_char_size(char convSpecifier,\n                                const char lengthModifier[2]) {\n  if (char_is_one_of(convSpecifier, \"CS\")) {\n    return sizeof(wchar_t);\n  }\n\n  if (char_is_one_of(convSpecifier, \"cs[\")) {\n    if (lengthModifier[0] == 'l' && lengthModifier[1] == '\\0')\n      return sizeof(wchar_t);\n    else if (lengthModifier[0] == '\\0')\n      return sizeof(char);\n  }\n\n  return 0;\n}\n\nenum FormatStoreSize {\n  // Store size not known in advance; can be calculated as wcslen() of the\n  // destination buffer.\n  FSS_WCSLEN = -2,\n  // Store size not known in advance; can be calculated as strlen() of the\n  // destination buffer.\n  FSS_STRLEN = -1,\n  // Invalid conversion specifier.\n  FSS_INVALID = 0\n};\n\n// Returns the memory size of a format directive (if >0), or a value of\n// FormatStoreSize.\nstatic int format_get_value_size(char convSpecifier,\n                                 const char lengthModifier[2],\n                                 bool promote_float) {\n  if (format_is_integer_conv(convSpecifier)) {\n    switch (lengthModifier[0]) {\n    case 'h':\n      return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);\n    case 'l':\n      return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);\n    case 'q':\n      return sizeof(long long);\n    case 'L':\n      return sizeof(long long);\n    case 'j':\n      return sizeof(INTMAX_T);\n    case 'z':\n      return sizeof(SIZE_T);\n    case 't':\n      return sizeof(PTRDIFF_T);\n    case 0:\n      return sizeof(int);\n    default:\n      return FSS_INVALID;\n    }\n  }\n\n  if (format_is_float_conv(convSpecifier)) {\n    switch (lengthModifier[0]) {\n    case 'L':\n    case 'q':\n      return sizeof(long double);\n    case 'l':\n      return lengthModifier[1] == 'l' ? sizeof(long double)\n                                           : sizeof(double);\n    case 0:\n      // Printf promotes floats to doubles but scanf does not\n      return promote_float ? sizeof(double) : sizeof(float);\n    default:\n      return FSS_INVALID;\n    }\n  }\n\n  if (convSpecifier == 'p') {\n    if (lengthModifier[0] != 0)\n      return FSS_INVALID;\n    return sizeof(void *);\n  }\n\n  return FSS_INVALID;\n}\n\nstruct ScanfDirective {\n  int argIdx; // argument index, or -1 if not specified (\"%n$\")\n  int fieldWidth;\n  const char *begin;\n  const char *end;\n  bool suppressed; // suppress assignment (\"*\")\n  bool allocate;   // allocate space (\"m\")\n  char lengthModifier[2];\n  char convSpecifier;\n  bool maybeGnuMalloc;\n};\n\n// Parse scanf format string. If a valid directive in encountered, it is\n// returned in dir. This function returns the pointer to the first\n// unprocessed character, or 0 in case of error.\n// In case of the end-of-string, a pointer to the closing \\0 is returned.\nstatic const char *scanf_parse_next(const char *p, bool allowGnuMalloc,\n                                    ScanfDirective *dir) {\n  internal_memset(dir, 0, sizeof(*dir));\n  dir->argIdx = -1;\n\n  while (*p) {\n    if (*p != '%') {\n      ++p;\n      continue;\n    }\n    dir->begin = p;\n    ++p;\n    // %%\n    if (*p == '%') {\n      ++p;\n      continue;\n    }\n    if (*p == '\\0') {\n      return nullptr;\n    }\n    // %n$\n    p = maybe_parse_param_index(p, &dir->argIdx);\n    CHECK(p);\n    // *\n    if (*p == '*') {\n      dir->suppressed = true;\n      ++p;\n    }\n    // Field width\n    if (*p >= '0' && *p <= '9') {\n      p = parse_number(p, &dir->fieldWidth);\n      CHECK(p);\n      if (dir->fieldWidth <= 0)  // Width if at all must be non-zero\n        return nullptr;\n    }\n    // m\n    if (*p == 'm') {\n      dir->allocate = true;\n      ++p;\n    }\n    // Length modifier.\n    p = maybe_parse_length_modifier(p, dir->lengthModifier);\n    // Conversion specifier.\n    dir->convSpecifier = *p++;\n    // Consume %[...] expression.\n    if (dir->convSpecifier == '[') {\n      if (*p == '^')\n        ++p;\n      if (*p == ']')\n        ++p;\n      while (*p && *p != ']')\n        ++p;\n      if (*p == 0)\n        return nullptr; // unexpected end of string\n                        // Consume the closing ']'.\n      ++p;\n    }\n    // This is unfortunately ambiguous between old GNU extension\n    // of %as, %aS and %a[...] and newer POSIX %a followed by\n    // letters s, S or [.\n    if (allowGnuMalloc && dir->convSpecifier == 'a' &&\n        !dir->lengthModifier[0]) {\n      if (*p == 's' || *p == 'S') {\n        dir->maybeGnuMalloc = true;\n        ++p;\n      } else if (*p == '[') {\n        // Watch for %a[h-j%d], if % appears in the\n        // [...] range, then we need to give up, we don't know\n        // if scanf will parse it as POSIX %a [h-j %d ] or\n        // GNU allocation of string with range dh-j plus %.\n        const char *q = p + 1;\n        if (*q == '^')\n          ++q;\n        if (*q == ']')\n          ++q;\n        while (*q && *q != ']' && *q != '%')\n          ++q;\n        if (*q == 0 || *q == '%')\n          return nullptr;\n        p = q + 1; // Consume the closing ']'.\n        dir->maybeGnuMalloc = true;\n      }\n    }\n    dir->end = p;\n    break;\n  }\n  return p;\n}\n\nstatic int scanf_get_value_size(ScanfDirective *dir) {\n  if (dir->allocate) {\n    if (!char_is_one_of(dir->convSpecifier, \"cCsS[\"))\n      return FSS_INVALID;\n    return sizeof(char *);\n  }\n\n  if (dir->maybeGnuMalloc) {\n    if (dir->convSpecifier != 'a' || dir->lengthModifier[0])\n      return FSS_INVALID;\n    // This is ambiguous, so check the smaller size of char * (if it is\n    // a GNU extension of %as, %aS or %a[...]) and float (if it is\n    // POSIX %a followed by s, S or [ letters).\n    return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);\n  }\n\n  if (char_is_one_of(dir->convSpecifier, \"cCsS[\")) {\n    bool needsTerminator = char_is_one_of(dir->convSpecifier, \"sS[\");\n    unsigned charSize =\n        format_get_char_size(dir->convSpecifier, dir->lengthModifier);\n    if (charSize == 0)\n      return FSS_INVALID;\n    if (dir->fieldWidth == 0) {\n      if (!needsTerminator)\n        return charSize;\n      return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;\n    }\n    return (dir->fieldWidth + needsTerminator) * charSize;\n  }\n\n  return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);\n}\n\n// Common part of *scanf interceptors.\n// Process format string and va_list, and report all store ranges.\n// Stops when \"consuming\" n_inputs input items.\nstatic void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,\n                         const char *format, va_list aq) {\n  CHECK_GT(n_inputs, 0);\n  const char *p = format;\n\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);\n\n  while (*p) {\n    ScanfDirective dir;\n    p = scanf_parse_next(p, allowGnuMalloc, &dir);\n    if (!p)\n      break;\n    if (dir.convSpecifier == 0) {\n      // This can only happen at the end of the format string.\n      CHECK_EQ(*p, 0);\n      break;\n    }\n    // Here the directive is valid. Do what it says.\n    if (dir.argIdx != -1) {\n      // Unsupported.\n      break;\n    }\n    if (dir.suppressed)\n      continue;\n    int size = scanf_get_value_size(&dir);\n    if (size == FSS_INVALID) {\n      Report(\"%s: WARNING: unexpected format specifier in scanf interceptor: %.*s\\n\",\n             SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);\n      break;\n    }\n    void *argp = va_arg(aq, void *);\n    if (dir.convSpecifier != 'n')\n      --n_inputs;\n    if (n_inputs < 0)\n      break;\n    if (size == FSS_STRLEN) {\n      size = internal_strlen((const char *)argp) + 1;\n    } else if (size == FSS_WCSLEN) {\n      // FIXME: actually use wcslen() to calculate it.\n      size = 0;\n    }\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);\n    // For %ms/%mc, write the allocated output buffer as well.\n    if (dir.allocate) {\n      char *buf = *(char **)argp;\n      if (buf)\n        COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);\n    }\n  }\n}\n\n#if SANITIZER_INTERCEPT_PRINTF\n\nstruct PrintfDirective {\n  int fieldWidth;\n  int fieldPrecision;\n  int argIdx; // width argument index, or -1 if not specified (\"%*n$\")\n  int precisionIdx; // precision argument index, or -1 if not specified (\".*n$\")\n  const char *begin;\n  const char *end;\n  bool starredWidth;\n  bool starredPrecision;\n  char lengthModifier[2];\n  char convSpecifier;\n};\n\nstatic const char *maybe_parse_number(const char *p, int *out) {\n  if (*p >= '0' && *p <= '9')\n    p = parse_number(p, out);\n  return p;\n}\n\nstatic const char *maybe_parse_number_or_star(const char *p, int *out,\n                                              bool *star) {\n  if (*p == '*') {\n    *star = true;\n    ++p;\n  } else {\n    *star = false;\n    p = maybe_parse_number(p, out);\n  }\n  return p;\n}\n\n// Parse printf format string. Same as scanf_parse_next.\nstatic const char *printf_parse_next(const char *p, PrintfDirective *dir) {\n  internal_memset(dir, 0, sizeof(*dir));\n  dir->argIdx = -1;\n  dir->precisionIdx = -1;\n\n  while (*p) {\n    if (*p != '%') {\n      ++p;\n      continue;\n    }\n    dir->begin = p;\n    ++p;\n    // %%\n    if (*p == '%') {\n      ++p;\n      continue;\n    }\n    if (*p == '\\0') {\n      return nullptr;\n    }\n    // %n$\n    p = maybe_parse_param_index(p, &dir->precisionIdx);\n    CHECK(p);\n    // Flags\n    while (char_is_one_of(*p, \"'-+ #0\")) {\n      ++p;\n    }\n    // Field width\n    p = maybe_parse_number_or_star(p, &dir->fieldWidth,\n                                   &dir->starredWidth);\n    if (!p)\n      return nullptr;\n    // Precision\n    if (*p == '.') {\n      ++p;\n      // Actual precision is optional (surprise!)\n      p = maybe_parse_number_or_star(p, &dir->fieldPrecision,\n                                     &dir->starredPrecision);\n      if (!p)\n        return nullptr;\n      // m$\n      if (dir->starredPrecision) {\n        p = maybe_parse_param_index(p, &dir->precisionIdx);\n        CHECK(p);\n      }\n    }\n    // Length modifier.\n    p = maybe_parse_length_modifier(p, dir->lengthModifier);\n    // Conversion specifier.\n    dir->convSpecifier = *p++;\n    dir->end = p;\n    break;\n  }\n  return p;\n}\n\nstatic int printf_get_value_size(PrintfDirective *dir) {\n  if (char_is_one_of(dir->convSpecifier, \"cCsS\")) {\n    unsigned charSize =\n        format_get_char_size(dir->convSpecifier, dir->lengthModifier);\n    if (charSize == 0)\n      return FSS_INVALID;\n    if (char_is_one_of(dir->convSpecifier, \"sS\")) {\n      return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;\n    }\n    return charSize;\n  }\n\n  return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);\n}\n\n#define SKIP_SCALAR_ARG(aq, convSpecifier, size)                   \\\n  do {                                                             \\\n    if (format_is_float_conv(convSpecifier)) {                     \\\n      switch (size) {                                              \\\n      case 8:                                                      \\\n        va_arg(*aq, double);                                       \\\n        break;                                                     \\\n      case 12:                                                     \\\n        va_arg(*aq, long double);                                  \\\n        break;                                                     \\\n      case 16:                                                     \\\n        va_arg(*aq, long double);                                  \\\n        break;                                                     \\\n      default:                                                     \\\n        Report(\"WARNING: unexpected floating-point arg size\"       \\\n               \" in printf interceptor: %zu\\n\", static_cast<uptr>(size));             \\\n        return;                                                    \\\n      }                                                            \\\n    } else {                                                       \\\n      switch (size) {                                              \\\n      case 1:                                                      \\\n      case 2:                                                      \\\n      case 4:                                                      \\\n        va_arg(*aq, u32);                                          \\\n        break;                                                     \\\n      case 8:                                                      \\\n        va_arg(*aq, u64);                                          \\\n        break;                                                     \\\n      default:                                                     \\\n        Report(\"WARNING: unexpected arg size\"                      \\\n               \" in printf interceptor: %zu\\n\", static_cast<uptr>(size));             \\\n        return;                                                    \\\n      }                                                            \\\n    }                                                              \\\n  } while (0)\n\n// Common part of *printf interceptors.\n// Process format string and va_list, and report all load ranges.\nstatic void printf_common(void *ctx, const char *format, va_list aq) {\n  COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);\n\n  const char *p = format;\n\n  while (*p) {\n    PrintfDirective dir;\n    p = printf_parse_next(p, &dir);\n    if (!p)\n      break;\n    if (dir.convSpecifier == 0) {\n      // This can only happen at the end of the format string.\n      CHECK_EQ(*p, 0);\n      break;\n    }\n    // Here the directive is valid. Do what it says.\n    if (dir.argIdx != -1 || dir.precisionIdx != -1) {\n      // Unsupported.\n      break;\n    }\n    if (dir.starredWidth) {\n      // Dynamic width\n      SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));\n    }\n    if (dir.starredPrecision) {\n      // Dynamic precision\n      SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));\n    }\n    // %m does not require an argument: strlen(errno).\n    if (dir.convSpecifier == 'm')\n      continue;\n    int size = printf_get_value_size(&dir);\n    if (size == FSS_INVALID) {\n      static int ReportedOnce;\n      if (!ReportedOnce++)\n        Report(\n            \"%s: WARNING: unexpected format specifier in printf \"\n            \"interceptor: %.*s (reported once per process)\\n\",\n            SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);\n      break;\n    }\n    if (dir.convSpecifier == 'n') {\n      void *argp = va_arg(aq, void *);\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);\n      continue;\n    } else if (size == FSS_STRLEN) {\n      if (void *argp = va_arg(aq, void *)) {\n        if (dir.starredPrecision) {\n          // FIXME: properly support starred precision for strings.\n          size = 0;\n        } else if (dir.fieldPrecision > 0) {\n          // Won't read more than \"precision\" symbols.\n          size = internal_strnlen((const char *)argp, dir.fieldPrecision);\n          if (size < dir.fieldPrecision) size++;\n        } else {\n          // Whole string will be accessed.\n          size = internal_strlen((const char *)argp) + 1;\n        }\n        COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);\n      }\n    } else if (size == FSS_WCSLEN) {\n      if (void *argp = va_arg(aq, void *)) {\n        // FIXME: Properly support wide-character strings (via wcsrtombs).\n        size = 0;\n        COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);\n      }\n    } else {\n      // Skip non-pointer args\n      SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);\n    }\n  }\n}\n\n#endif // SANITIZER_INTERCEPT_PRINTF\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_ioctl.inc",
    "content": "//===-- sanitizer_common_interceptors_ioctl.inc -----------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Ioctl handling in common sanitizer interceptors.\n//===----------------------------------------------------------------------===//\n\n#if !SANITIZER_NETBSD\n\n#include \"sanitizer_flags.h\"\n\nstruct ioctl_desc {\n  unsigned req;\n  // FIXME: support read+write arguments. Currently READWRITE and WRITE do the\n  // same thing.\n  // XXX: The declarations below may use WRITE instead of READWRITE, unless\n  // explicitly noted.\n  enum {\n    NONE,\n    READ,\n    WRITE,\n    READWRITE,\n    CUSTOM\n  } type : 3;\n  unsigned size : 29;\n  const char* name;\n};\n\nconst unsigned ioctl_table_max = 500;\nstatic ioctl_desc ioctl_table[ioctl_table_max];\nstatic unsigned ioctl_table_size = 0;\n\n// This can not be declared as a global, because references to struct_*_sz\n// require a global initializer. And this table must be available before global\n// initializers are run.\nstatic void ioctl_table_fill() {\n#define _(rq, tp, sz)                                    \\\n  if (IOCTL_##rq != IOCTL_NOT_PRESENT) {                 \\\n    CHECK(ioctl_table_size < ioctl_table_max);           \\\n    ioctl_table[ioctl_table_size].req = IOCTL_##rq;      \\\n    ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \\\n    ioctl_table[ioctl_table_size].size = sz;             \\\n    ioctl_table[ioctl_table_size].name = #rq;            \\\n    ++ioctl_table_size;                                  \\\n  }\n\n  _(FIOASYNC, READ, sizeof(int));\n  _(FIOCLEX, NONE, 0);\n  _(FIOGETOWN, WRITE, sizeof(int));\n  _(FIONBIO, READ, sizeof(int));\n  _(FIONCLEX, NONE, 0);\n  _(FIOSETOWN, READ, sizeof(int));\n  _(SIOCATMARK, WRITE, sizeof(int));\n  _(SIOCGIFCONF, CUSTOM, 0);\n  _(SIOCGPGRP, WRITE, sizeof(int));\n  _(SIOCSPGRP, READ, sizeof(int));\n#if !SANITIZER_SOLARIS\n  _(TIOCCONS, NONE, 0);\n#endif\n  _(TIOCEXCL, NONE, 0);\n  _(TIOCGETD, WRITE, sizeof(int));\n  _(TIOCGPGRP, WRITE, pid_t_sz);\n  _(TIOCGWINSZ, WRITE, struct_winsize_sz);\n  _(TIOCMBIC, READ, sizeof(int));\n  _(TIOCMBIS, READ, sizeof(int));\n  _(TIOCMGET, WRITE, sizeof(int));\n  _(TIOCMSET, READ, sizeof(int));\n  _(TIOCNOTTY, NONE, 0);\n  _(TIOCNXCL, NONE, 0);\n  _(TIOCOUTQ, WRITE, sizeof(int));\n  _(TIOCPKT, READ, sizeof(int));\n  _(TIOCSCTTY, NONE, 0);\n  _(TIOCSETD, READ, sizeof(int));\n  _(TIOCSPGRP, READ, pid_t_sz);\n  _(TIOCSTI, READ, sizeof(char));\n  _(TIOCSWINSZ, READ, struct_winsize_sz);\n\n#if !SANITIZER_IOS\n  _(SIOCADDMULTI, READ, struct_ifreq_sz);\n  _(SIOCDELMULTI, READ, struct_ifreq_sz);\n  _(SIOCGIFADDR, WRITE, struct_ifreq_sz);\n  _(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);\n  _(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);\n  _(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);\n  _(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);\n  _(SIOCGIFMTU, WRITE, struct_ifreq_sz);\n  _(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);\n  _(SIOCSIFADDR, READ, struct_ifreq_sz);\n  _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);\n  _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);\n  _(SIOCSIFFLAGS, READ, struct_ifreq_sz);\n  _(SIOCSIFMETRIC, READ, struct_ifreq_sz);\n  _(SIOCSIFMTU, READ, struct_ifreq_sz);\n  _(SIOCSIFNETMASK, READ, struct_ifreq_sz);\n#endif\n\n#if (SANITIZER_LINUX && !SANITIZER_ANDROID)\n  _(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);\n  _(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);\n#endif\n\n#if SANITIZER_LINUX\n  // Conflicting request ids.\n  // _(CDROMAUDIOBUFSIZ, NONE, 0);\n  // _(SNDCTL_TMR_CONTINUE, NONE, 0);\n  // _(SNDCTL_TMR_START, NONE, 0);\n  // _(SNDCTL_TMR_STOP, NONE, 0);\n  // _(SOUND_MIXER_READ_LOUD, WRITE, sizeof(int)); // same as ...READ_ENHANCE\n  // _(SOUND_MIXER_READ_MUTE, WRITE, sizeof(int)); // same as ...READ_ENHANCE\n  // _(SOUND_MIXER_WRITE_LOUD, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE\n  // _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE\n  _(BLKFLSBUF, NONE, 0);\n  _(BLKGETSIZE, WRITE, sizeof(uptr));\n  _(BLKRAGET, WRITE, sizeof(int));\n  _(BLKRASET, NONE, 0);\n  _(BLKROGET, WRITE, sizeof(int));\n  _(BLKROSET, READ, sizeof(int));\n  _(BLKRRPART, NONE, 0);\n  _(CDROMEJECT, NONE, 0);\n  _(CDROMEJECT_SW, NONE, 0);\n  _(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);\n  _(CDROMPAUSE, NONE, 0);\n  _(CDROMPLAYMSF, READ, struct_cdrom_msf_sz);\n  _(CDROMPLAYTRKIND, READ, struct_cdrom_ti_sz);\n  _(CDROMREADAUDIO, READ, struct_cdrom_read_audio_sz);\n  _(CDROMREADCOOKED, READ, struct_cdrom_msf_sz);\n  _(CDROMREADMODE1, READ, struct_cdrom_msf_sz);\n  _(CDROMREADMODE2, READ, struct_cdrom_msf_sz);\n  _(CDROMREADRAW, READ, struct_cdrom_msf_sz);\n  _(CDROMREADTOCENTRY, WRITE, struct_cdrom_tocentry_sz);\n  _(CDROMREADTOCHDR, WRITE, struct_cdrom_tochdr_sz);\n  _(CDROMRESET, NONE, 0);\n  _(CDROMRESUME, NONE, 0);\n  _(CDROMSEEK, READ, struct_cdrom_msf_sz);\n  _(CDROMSTART, NONE, 0);\n  _(CDROMSTOP, NONE, 0);\n  _(CDROMSUBCHNL, WRITE, struct_cdrom_subchnl_sz);\n  _(CDROMVOLCTRL, READ, struct_cdrom_volctrl_sz);\n  _(CDROMVOLREAD, WRITE, struct_cdrom_volctrl_sz);\n  _(CDROM_GET_UPC, WRITE, 8);\n  _(EVIOCGABS, WRITE, struct_input_absinfo_sz); // fixup\n  _(EVIOCGBIT, WRITE, struct_input_id_sz); // fixup\n  _(EVIOCGEFFECTS, WRITE, sizeof(int));\n  _(EVIOCGID, WRITE, struct_input_id_sz);\n  _(EVIOCGKEY, WRITE, 0);\n  _(EVIOCGKEYCODE, WRITE, sizeof(int) * 2);\n  _(EVIOCGLED, WRITE, 0);\n  _(EVIOCGNAME, WRITE, 0);\n  _(EVIOCGPHYS, WRITE, 0);\n  _(EVIOCGRAB, READ, sizeof(int));\n  _(EVIOCGREP, WRITE, sizeof(int) * 2);\n  _(EVIOCGSND, WRITE, 0);\n  _(EVIOCGSW, WRITE, 0);\n  _(EVIOCGUNIQ, WRITE, 0);\n  _(EVIOCGVERSION, WRITE, sizeof(int));\n  _(EVIOCRMFF, READ, sizeof(int));\n  _(EVIOCSABS, READ, struct_input_absinfo_sz); // fixup\n  _(EVIOCSFF, READ, struct_ff_effect_sz);\n  _(EVIOCSKEYCODE, READ, sizeof(int) * 2);\n  _(EVIOCSREP, READ, sizeof(int) * 2);\n  _(FDCLRPRM, NONE, 0);\n  _(FDDEFPRM, READ, struct_floppy_struct_sz);\n  _(FDFLUSH, NONE, 0);\n  _(FDFMTBEG, NONE, 0);\n  _(FDFMTEND, NONE, 0);\n  _(FDFMTTRK, READ, struct_format_descr_sz);\n  _(FDGETDRVPRM, WRITE, struct_floppy_drive_params_sz);\n  _(FDGETDRVSTAT, WRITE, struct_floppy_drive_struct_sz);\n  _(FDGETDRVTYP, WRITE, 16);\n  _(FDGETFDCSTAT, WRITE, struct_floppy_fdc_state_sz);\n  _(FDGETMAXERRS, WRITE, struct_floppy_max_errors_sz);\n  _(FDGETPRM, WRITE, struct_floppy_struct_sz);\n  _(FDMSGOFF, NONE, 0);\n  _(FDMSGON, NONE, 0);\n  _(FDPOLLDRVSTAT, WRITE, struct_floppy_drive_struct_sz);\n  _(FDRAWCMD, WRITE, struct_floppy_raw_cmd_sz);\n  _(FDRESET, NONE, 0);\n  _(FDSETDRVPRM, READ, struct_floppy_drive_params_sz);\n  _(FDSETEMSGTRESH, NONE, 0);\n  _(FDSETMAXERRS, READ, struct_floppy_max_errors_sz);\n  _(FDSETPRM, READ, struct_floppy_struct_sz);\n  _(FDTWADDLE, NONE, 0);\n  _(FDWERRORCLR, NONE, 0);\n  _(FDWERRORGET, WRITE, struct_floppy_write_errors_sz);\n  _(HDIO_DRIVE_CMD, WRITE, sizeof(int));\n  _(HDIO_GETGEO, WRITE, struct_hd_geometry_sz);\n  _(HDIO_GET_32BIT, WRITE, sizeof(int));\n  _(HDIO_GET_DMA, WRITE, sizeof(int));\n  _(HDIO_GET_IDENTITY, WRITE, struct_hd_driveid_sz);\n  _(HDIO_GET_KEEPSETTINGS, WRITE, sizeof(int));\n  _(HDIO_GET_MULTCOUNT, WRITE, sizeof(int));\n  _(HDIO_GET_NOWERR, WRITE, sizeof(int));\n  _(HDIO_GET_UNMASKINTR, WRITE, sizeof(int));\n  _(HDIO_SET_32BIT, NONE, 0);\n  _(HDIO_SET_DMA, NONE, 0);\n  _(HDIO_SET_KEEPSETTINGS, NONE, 0);\n  _(HDIO_SET_MULTCOUNT, NONE, 0);\n  _(HDIO_SET_NOWERR, NONE, 0);\n  _(HDIO_SET_UNMASKINTR, NONE, 0);\n  _(MTIOCGET, WRITE, struct_mtget_sz);\n  _(MTIOCPOS, WRITE, struct_mtpos_sz);\n  _(MTIOCTOP, READ, struct_mtop_sz);\n  _(PPPIOCGASYNCMAP, WRITE, sizeof(int));\n  _(PPPIOCGDEBUG, WRITE, sizeof(int));\n  _(PPPIOCGFLAGS, WRITE, sizeof(int));\n  _(PPPIOCGUNIT, WRITE, sizeof(int));\n  _(PPPIOCGXASYNCMAP, WRITE, sizeof(int) * 8);\n  _(PPPIOCSASYNCMAP, READ, sizeof(int));\n  _(PPPIOCSDEBUG, READ, sizeof(int));\n  _(PPPIOCSFLAGS, READ, sizeof(int));\n  _(PPPIOCSMAXCID, READ, sizeof(int));\n  _(PPPIOCSMRU, READ, sizeof(int));\n  _(PPPIOCSXASYNCMAP, READ, sizeof(int) * 8);\n  _(SIOCADDRT, READ, struct_rtentry_sz);\n  _(SIOCDARP, READ, struct_arpreq_sz);\n  _(SIOCDELRT, READ, struct_rtentry_sz);\n  _(SIOCDRARP, READ, struct_arpreq_sz);\n  _(SIOCGARP, WRITE, struct_arpreq_sz);\n  _(SIOCGIFENCAP, WRITE, sizeof(int));\n  _(SIOCGIFHWADDR, WRITE, struct_ifreq_sz);\n  _(SIOCGIFMAP, WRITE, struct_ifreq_sz);\n  _(SIOCGIFMEM, WRITE, struct_ifreq_sz);\n  _(SIOCGIFNAME, NONE, 0);\n  _(SIOCGIFSLAVE, NONE, 0);\n  _(SIOCGRARP, WRITE, struct_arpreq_sz);\n  _(SIOCGSTAMP, WRITE, timeval_sz);\n  _(SIOCSARP, READ, struct_arpreq_sz);\n  _(SIOCSIFENCAP, READ, sizeof(int));\n  _(SIOCSIFHWADDR, READ, struct_ifreq_sz);\n  _(SIOCSIFLINK, NONE, 0);\n  _(SIOCSIFMAP, READ, struct_ifreq_sz);\n  _(SIOCSIFMEM, READ, struct_ifreq_sz);\n  _(SIOCSIFSLAVE, NONE, 0);\n  _(SIOCSRARP, READ, struct_arpreq_sz);\n  _(SNDCTL_COPR_HALT, WRITE, struct_copr_debug_buf_sz);\n  _(SNDCTL_COPR_LOAD, READ, struct_copr_buffer_sz);\n  _(SNDCTL_COPR_RCODE, WRITE, struct_copr_debug_buf_sz);\n  _(SNDCTL_COPR_RCVMSG, WRITE, struct_copr_msg_sz);\n  _(SNDCTL_COPR_RDATA, WRITE, struct_copr_debug_buf_sz);\n  _(SNDCTL_COPR_RESET, NONE, 0);\n  _(SNDCTL_COPR_RUN, WRITE, struct_copr_debug_buf_sz);\n  _(SNDCTL_COPR_SENDMSG, READ, struct_copr_msg_sz);\n  _(SNDCTL_COPR_WCODE, READ, struct_copr_debug_buf_sz);\n  _(SNDCTL_COPR_WDATA, READ, struct_copr_debug_buf_sz);\n  _(SNDCTL_DSP_GETBLKSIZE, WRITE, sizeof(int));\n  _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));\n  _(SNDCTL_DSP_NONBLOCK, NONE, 0);\n  _(SNDCTL_DSP_POST, NONE, 0);\n  _(SNDCTL_DSP_RESET, NONE, 0);\n  _(SNDCTL_DSP_SETFMT, WRITE, sizeof(int));\n  _(SNDCTL_DSP_SETFRAGMENT, WRITE, sizeof(int));\n  _(SNDCTL_DSP_SPEED, WRITE, sizeof(int));\n  _(SNDCTL_DSP_STEREO, WRITE, sizeof(int));\n  _(SNDCTL_DSP_SUBDIVIDE, WRITE, sizeof(int));\n  _(SNDCTL_DSP_SYNC, NONE, 0);\n  _(SNDCTL_FM_4OP_ENABLE, READ, sizeof(int));\n  _(SNDCTL_FM_LOAD_INSTR, READ, struct_sbi_instrument_sz);\n  _(SNDCTL_MIDI_INFO, WRITE, struct_midi_info_sz);\n  _(SNDCTL_MIDI_PRETIME, WRITE, sizeof(int));\n  _(SNDCTL_SEQ_CTRLRATE, WRITE, sizeof(int));\n  _(SNDCTL_SEQ_GETINCOUNT, WRITE, sizeof(int));\n  _(SNDCTL_SEQ_GETOUTCOUNT, WRITE, sizeof(int));\n  _(SNDCTL_SEQ_NRMIDIS, WRITE, sizeof(int));\n  _(SNDCTL_SEQ_NRSYNTHS, WRITE, sizeof(int));\n  _(SNDCTL_SEQ_OUTOFBAND, READ, struct_seq_event_rec_sz);\n  _(SNDCTL_SEQ_PANIC, NONE, 0);\n  _(SNDCTL_SEQ_PERCMODE, NONE, 0);\n  _(SNDCTL_SEQ_RESET, NONE, 0);\n  _(SNDCTL_SEQ_RESETSAMPLES, READ, sizeof(int));\n  _(SNDCTL_SEQ_SYNC, NONE, 0);\n  _(SNDCTL_SEQ_TESTMIDI, READ, sizeof(int));\n  _(SNDCTL_SEQ_THRESHOLD, READ, sizeof(int));\n  _(SNDCTL_SYNTH_INFO, WRITE, struct_synth_info_sz);\n  _(SNDCTL_SYNTH_MEMAVL, WRITE, sizeof(int));\n  _(SNDCTL_TMR_METRONOME, READ, sizeof(int));\n  _(SNDCTL_TMR_SELECT, WRITE, sizeof(int));\n  _(SNDCTL_TMR_SOURCE, WRITE, sizeof(int));\n  _(SNDCTL_TMR_TEMPO, WRITE, sizeof(int));\n  _(SNDCTL_TMR_TIMEBASE, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_ALTPCM, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_BASS, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_CAPS, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_CD, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_DEVMASK, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_ENHANCE, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_IGAIN, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_IMIX, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_LINE, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_LINE1, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_LINE2, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_LINE3, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_MIC, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_OGAIN, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_PCM, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_RECLEV, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_RECMASK, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_RECSRC, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_SPEAKER, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_STEREODEVS, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_SYNTH, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_TREBLE, WRITE, sizeof(int));\n  _(SOUND_MIXER_READ_VOLUME, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_ALTPCM, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_BASS, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_CD, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_ENHANCE, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_IGAIN, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_IMIX, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_LINE, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_LINE1, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_LINE2, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_LINE3, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_MIC, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_OGAIN, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_PCM, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_RECLEV, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_RECSRC, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_SPEAKER, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_SYNTH, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_TREBLE, WRITE, sizeof(int));\n  _(SOUND_MIXER_WRITE_VOLUME, WRITE, sizeof(int));\n  _(SOUND_PCM_READ_BITS, WRITE, sizeof(int));\n  _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));\n  _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));\n  _(SOUND_PCM_READ_RATE, WRITE, sizeof(int));\n  _(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));\n  _(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));\n  _(TCFLSH, NONE, 0);\n#if SANITIZER_GLIBC\n  _(TCGETA, WRITE, struct_termio_sz);\n#endif\n  _(TCGETS, WRITE, struct_termios_sz);\n  _(TCSBRK, NONE, 0);\n  _(TCSBRKP, NONE, 0);\n#if SANITIZER_GLIBC\n  _(TCSETA, READ, struct_termio_sz);\n  _(TCSETAF, READ, struct_termio_sz);\n  _(TCSETAW, READ, struct_termio_sz);\n#endif\n  _(TCSETS, READ, struct_termios_sz);\n  _(TCSETSF, READ, struct_termios_sz);\n  _(TCSETSW, READ, struct_termios_sz);\n  _(TCXONC, NONE, 0);\n  _(TIOCGLCKTRMIOS, WRITE, struct_termios_sz);\n  _(TIOCGSOFTCAR, WRITE, sizeof(int));\n  _(TIOCINQ, WRITE, sizeof(int));\n  _(TIOCLINUX, READ, sizeof(char));\n  _(TIOCSERCONFIG, NONE, 0);\n  _(TIOCSERGETLSR, WRITE, sizeof(int));\n  _(TIOCSERGWILD, WRITE, sizeof(int));\n  _(TIOCSERSWILD, READ, sizeof(int));\n  _(TIOCSLCKTRMIOS, READ, struct_termios_sz);\n  _(TIOCSSOFTCAR, READ, sizeof(int));\n  _(VT_ACTIVATE, NONE, 0);\n  _(VT_DISALLOCATE, NONE, 0);\n  _(VT_GETMODE, WRITE, struct_vt_mode_sz);\n  _(VT_GETSTATE, WRITE, struct_vt_stat_sz);\n  _(VT_OPENQRY, WRITE, sizeof(int));\n  _(VT_RELDISP, NONE, 0);\n  _(VT_RESIZE, READ, struct_vt_sizes_sz);\n  _(VT_RESIZEX, READ, struct_vt_consize_sz);\n  _(VT_SENDSIG, NONE, 0);\n  _(VT_SETMODE, READ, struct_vt_mode_sz);\n  _(VT_WAITACTIVE, NONE, 0);\n#endif\n\n#if SANITIZER_GLIBC\n  // _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE\n  _(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);\n  _(EQL_ENSLAVE, WRITE, struct_ifreq_sz);\n  _(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);\n  _(EQL_GETSLAVECFG, WRITE, struct_ifreq_sz);\n  _(EQL_SETMASTRCFG, WRITE, struct_ifreq_sz);\n  _(EQL_SETSLAVECFG, WRITE, struct_ifreq_sz);\n  _(EVIOCGKEYCODE_V2, WRITE, struct_input_keymap_entry_sz);\n  _(EVIOCGPROP, WRITE, 0);\n  _(EVIOCSKEYCODE_V2, READ, struct_input_keymap_entry_sz);\n  _(FS_IOC_GETFLAGS, WRITE, sizeof(int));\n  _(FS_IOC_GETVERSION, WRITE, sizeof(int));\n  _(FS_IOC_SETFLAGS, READ, sizeof(int));\n  _(FS_IOC_SETVERSION, READ, sizeof(int));\n  _(GIO_CMAP, WRITE, 48);\n  _(GIO_FONT, WRITE, 8192);\n  _(GIO_SCRNMAP, WRITE, e_tabsz);\n  _(GIO_UNIMAP, WRITE, struct_unimapdesc_sz);\n  _(GIO_UNISCRNMAP, WRITE, sizeof(short) * e_tabsz);\n  _(KDADDIO, NONE, 0);\n  _(KDDELIO, NONE, 0);\n  _(KDDISABIO, NONE, 0);\n  _(KDENABIO, NONE, 0);\n  _(KDGETKEYCODE, WRITE, struct_kbkeycode_sz);\n  _(KDGETLED, WRITE, 1);\n  _(KDGETMODE, WRITE, sizeof(int));\n  _(KDGKBDIACR, WRITE, struct_kbdiacrs_sz);\n  _(KDGKBENT, WRITE, struct_kbentry_sz);\n  _(KDGKBLED, WRITE, sizeof(int));\n  _(KDGKBMETA, WRITE, sizeof(int));\n  _(KDGKBMODE, WRITE, sizeof(int));\n  _(KDGKBSENT, WRITE, struct_kbsentry_sz);\n  _(KDGKBTYPE, WRITE, 1);\n  _(KDMAPDISP, NONE, 0);\n  _(KDMKTONE, NONE, 0);\n  _(KDSETKEYCODE, READ, struct_kbkeycode_sz);\n  _(KDSETLED, NONE, 0);\n  _(KDSETMODE, NONE, 0);\n  _(KDSIGACCEPT, NONE, 0);\n  _(KDSKBDIACR, READ, struct_kbdiacrs_sz);\n  _(KDSKBENT, READ, struct_kbentry_sz);\n  _(KDSKBLED, NONE, 0);\n  _(KDSKBMETA, NONE, 0);\n  _(KDSKBMODE, NONE, 0);\n  _(KDSKBSENT, READ, struct_kbsentry_sz);\n  _(KDUNMAPDISP, NONE, 0);\n  _(KIOCSOUND, NONE, 0);\n  _(LPABORT, NONE, 0);\n  _(LPABORTOPEN, NONE, 0);\n  _(LPCAREFUL, NONE, 0);\n  _(LPCHAR, NONE, 0);\n  _(LPGETIRQ, WRITE, sizeof(int));\n  _(LPGETSTATUS, WRITE, sizeof(int));\n  _(LPRESET, NONE, 0);\n  _(LPSETIRQ, NONE, 0);\n  _(LPTIME, NONE, 0);\n  _(LPWAIT, NONE, 0);\n  _(MTIOCGETCONFIG, WRITE, struct_mtconfiginfo_sz);\n  _(MTIOCSETCONFIG, READ, struct_mtconfiginfo_sz);\n  _(PIO_CMAP, NONE, 0);\n  _(PIO_FONT, READ, 8192);\n  _(PIO_SCRNMAP, READ, e_tabsz);\n  _(PIO_UNIMAP, READ, struct_unimapdesc_sz);\n  _(PIO_UNIMAPCLR, READ, struct_unimapinit_sz);\n  _(PIO_UNISCRNMAP, READ, sizeof(short) * e_tabsz);\n  _(SCSI_IOCTL_PROBE_HOST, READ, sizeof(int));\n  _(SCSI_IOCTL_TAGGED_DISABLE, NONE, 0);\n  _(SCSI_IOCTL_TAGGED_ENABLE, NONE, 0);\n  _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);\n  _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);\n  _(TIOCGSERIAL, WRITE, struct_serial_struct_sz);\n  _(TIOCSERGETMULTI, WRITE, struct_serial_multiport_struct_sz);\n  _(TIOCSERSETMULTI, READ, struct_serial_multiport_struct_sz);\n  _(TIOCSSERIAL, READ, struct_serial_struct_sz);\n\n  // The following ioctl requests are shared between AX25, IPX, netrom and\n  // mrouted.\n  // _(SIOCAIPXITFCRT, READ, sizeof(char));\n  // _(SIOCAX25GETUID, READ, struct_sockaddr_ax25_sz);\n  // _(SIOCNRGETPARMS, WRITE, struct_nr_parms_struct_sz);\n  // _(SIOCAIPXPRISLT, READ, sizeof(char));\n  // _(SIOCNRSETPARMS, READ, struct_nr_parms_struct_sz);\n  // _(SIOCAX25ADDUID, READ, struct_sockaddr_ax25_sz);\n  // _(SIOCNRDECOBS, NONE, 0);\n  // _(SIOCAX25DELUID, READ, struct_sockaddr_ax25_sz);\n  // _(SIOCIPXCFGDATA, WRITE, struct_ipx_config_data_sz);\n  // _(SIOCAX25NOUID, READ, sizeof(int));\n  // _(SIOCNRRTCTL, READ, sizeof(int));\n  // _(SIOCAX25DIGCTL, READ, sizeof(int));\n  // _(SIOCAX25GETPARMS, WRITE, struct_ax25_parms_struct_sz);\n  // _(SIOCAX25SETPARMS, READ, struct_ax25_parms_struct_sz);\n#endif\n#undef _\n}\n\nstatic bool ioctl_initialized = false;\n\nstruct ioctl_desc_compare {\n  bool operator()(const ioctl_desc& left, const ioctl_desc& right) const {\n    return left.req < right.req;\n  }\n};\n\nstatic void ioctl_init() {\n  ioctl_table_fill();\n  Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());\n\n  bool bad = false;\n  for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {\n    if (ioctl_table[i].req >= ioctl_table[i + 1].req) {\n      Printf(\"Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\\n\",\n             ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,\n             ioctl_table[i + 1].name);\n      bad = true;\n    }\n  }\n\n  if (bad) Die();\n\n  ioctl_initialized = true;\n}\n\n// Handle the most evil ioctls that encode argument value as part of request id.\nstatic unsigned ioctl_request_fixup(unsigned req) {\n#if SANITIZER_LINUX\n  // Strip size and event number.\n  const unsigned kEviocgbitMask =\n      (IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;\n  if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)\n    return IOCTL_EVIOCGBIT;\n  // Strip absolute axis number.\n  if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)\n    return IOCTL_EVIOCGABS;\n  if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)\n    return IOCTL_EVIOCSABS;\n#endif\n  return req;\n}\n\nstatic const ioctl_desc *ioctl_table_lookup(unsigned req) {\n  int left = 0;\n  int right = ioctl_table_size;\n  while (left < right) {\n    int mid = (left + right) / 2;\n    if (ioctl_table[mid].req < req)\n      left = mid + 1;\n    else\n      right = mid;\n  }\n  if (left == right && ioctl_table[left].req == req)\n    return ioctl_table + left;\n  else\n    return nullptr;\n}\n\nstatic bool ioctl_decode(unsigned req, ioctl_desc *desc) {\n  CHECK(desc);\n  desc->req = req;\n  desc->name = \"<DECODED_IOCTL>\";\n  desc->size = IOC_SIZE(req);\n  // Sanity check.\n  if (desc->size > 0xFFFF) return false;\n  unsigned dir = IOC_DIR(req);\n  switch (dir) {\n    case IOC_NONE:\n      desc->type = ioctl_desc::NONE;\n      break;\n    case IOC_READ | IOC_WRITE:\n      desc->type = ioctl_desc::READWRITE;\n      break;\n    case IOC_READ:\n      desc->type = ioctl_desc::WRITE;\n      break;\n    case IOC_WRITE:\n      desc->type = ioctl_desc::READ;\n      break;\n    default:\n      return false;\n  }\n  // Size can be 0 iff type is NONE.\n  if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;\n  // Sanity check.\n  if (IOC_TYPE(req) == 0) return false;\n  return true;\n}\n\nstatic const ioctl_desc *ioctl_lookup(unsigned req) {\n  req = ioctl_request_fixup(req);\n  const ioctl_desc *desc = ioctl_table_lookup(req);\n  if (desc) return desc;\n\n  // Try stripping access size from the request id.\n  desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));\n  // Sanity check: requests that encode access size are either read or write and\n  // have size of 0 in the table.\n  if (desc && desc->size == 0 &&\n      (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||\n       desc->type == ioctl_desc::READ))\n    return desc;\n  return nullptr;\n}\n\nstatic void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,\n                             unsigned request, void *arg) {\n  if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {\n    unsigned size = desc->size ? desc->size : IOC_SIZE(request);\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);\n  }\n  if (desc->type != ioctl_desc::CUSTOM)\n    return;\n  if (request == IOCTL_SIOCGIFCONF) {\n    struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,\n                                  sizeof(ifc->ifc_len));\n  }\n}\n\nstatic void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,\n                              unsigned request, void *arg) {\n  if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {\n    // FIXME: add verbose output\n    unsigned size = desc->size ? desc->size : IOC_SIZE(request);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);\n  }\n  if (desc->type != ioctl_desc::CUSTOM)\n    return;\n  if (request == IOCTL_SIOCGIFCONF) {\n    struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);\n  }\n}\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc",
    "content": "//===-- sanitizer_common_interceptors_netbsd_compat.inc ---------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common function interceptors for tools like AddressSanitizer,\n// ThreadSanitizer, MemorySanitizer, etc.\n//\n// Interceptors for NetBSD old function calls that have been versioned.\n//\n// NetBSD minimal version supported 9.0.\n// NetBSD current version supported 9.99.26.\n//\n//===----------------------------------------------------------------------===//\n\n#if SANITIZER_NETBSD\n\n// First undef all mangled symbols.\n// Next, define compat interceptors.\n// Finally, undef INIT_ and redefine it.\n// This allows to avoid preprocessor issues.\n\n#undef fstatvfs\n#undef fstatvfs1\n#undef getmntinfo\n#undef getvfsstat\n#undef statvfs\n#undef statvfs1\n\nINTERCEPTOR(int, statvfs, char *path, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(statvfs)(path, buf);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);\n  return res;\n}\n\nINTERCEPTOR(int, fstatvfs, int fd, void *buf) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  // FIXME: under ASan the call below may write to freed memory and corrupt\n  // its metadata. See\n  // https://github.com/google/sanitizers/issues/321.\n  int res = REAL(fstatvfs)(fd, buf);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);\n    if (fd >= 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return res;\n}\n\n#undef INIT_STATVFS\n#define INIT_STATVFS \\\n  COMMON_INTERCEPT_FUNCTION(statvfs); \\\n  COMMON_INTERCEPT_FUNCTION(fstatvfs); \\\n  COMMON_INTERCEPT_FUNCTION(__statvfs90); \\\n  COMMON_INTERCEPT_FUNCTION(__fstatvfs90)\n\nINTERCEPTOR(int, __getmntinfo13, void **mntbufp, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, __getmntinfo13, mntbufp, flags);\n  int cnt = REAL(__getmntinfo13)(mntbufp, flags);\n  if (cnt > 0 && mntbufp) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));\n    if (*mntbufp)\n      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs90_sz);\n  }\n  return cnt;\n}\n\n#undef INIT_GETMNTINFO\n#define INIT_GETMNTINFO \\\n  COMMON_INTERCEPT_FUNCTION(__getmntinfo13); \\\n  COMMON_INTERCEPT_FUNCTION(__getmntinfo90)\n\nINTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);\n  int ret = REAL(getvfsstat)(buf, bufsize, flags);\n  if (buf && ret > 0)\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs90_sz);\n  return ret;\n}\n\n#undef INIT_GETVFSSTAT\n#define INIT_GETVFSSTAT \\\n  COMMON_INTERCEPT_FUNCTION(getvfsstat); \\\n  COMMON_INTERCEPT_FUNCTION(__getvfsstat90)\n\nINTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);\n  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);\n  int res = REAL(statvfs1)(path, buf, flags);\n  if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);\n  return res;\n}\n\nINTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {\n  void *ctx;\n  COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);\n  COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);\n  int res = REAL(fstatvfs1)(fd, buf, flags);\n  if (!res) {\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);\n    if (fd >= 0)\n      COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);\n  }\n  return res;\n}\n\n#undef INIT_STATVFS1\n#define INIT_STATVFS1 \\\n  COMMON_INTERCEPT_FUNCTION(statvfs1); \\\n  COMMON_INTERCEPT_FUNCTION(fstatvfs1); \\\n  COMMON_INTERCEPT_FUNCTION(__statvfs190); \\\n  COMMON_INTERCEPT_FUNCTION(__fstatvfs190)\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S",
    "content": "#if defined(__aarch64__) && defined(__linux__)\n\n#include \"sanitizer_common/sanitizer_asm.h\"\n#include \"builtins/assembly.h\"\n\nASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)\n\n.comm _ZN14__interception10real_vforkE,8,8\n.globl ASM_WRAPPER_NAME(vfork)\nASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))\nASM_WRAPPER_NAME(vfork):\n        // Save x30 in the off-stack spill area.\n        hint    #25 // paciasp\n        stp     xzr, x30, [sp, #-16]!\n        bl      COMMON_INTERCEPTOR_SPILL_AREA\n        ldp     xzr, x30, [sp], 16\n        str     x30, [x0]\n\n        // Call real vfork. This may return twice. User code that runs between the first and the second return\n        // may clobber the stack frame of the interceptor; that's why it does not have a frame.\n        adrp    x0, _ZN14__interception10real_vforkE\n        ldr     x0, [x0, :lo12:_ZN14__interception10real_vforkE]\n        blr     x0\n\n        stp     x0, xzr, [sp, #-16]!\n        cmp     x0, #0\n        b.eq   .L_exit\n\n        // x0 != 0 => parent process. Clear stack shadow.\n        add    x0, sp, #16\n        bl     COMMON_INTERCEPTOR_HANDLE_VFORK\n\n.L_exit:\n        // Restore x30.\n        bl     COMMON_INTERCEPTOR_SPILL_AREA\n        ldr    x30, [x0]\n        ldp    x0, xzr, [sp], 16\n        hint   #29 // autiasp\n\n        ret\nASM_SIZE(vfork)\n\n.weak vfork\n.set vfork, ASM_WRAPPER_NAME(vfork)\n\nGNU_PROPERTY_BTI_PAC\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S",
    "content": "#if defined(__arm__) && defined(__linux__)\n\n#include \"sanitizer_common/sanitizer_asm.h\"\n\nASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)\n\n.comm _ZN14__interception10real_vforkE,4,4\n.globl ASM_WRAPPER_NAME(vfork)\nASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))\nASM_WRAPPER_NAME(vfork):\n        // Save LR in the off-stack spill area.\n        push    {r4, lr}\n        bl      COMMON_INTERCEPTOR_SPILL_AREA\n        pop     {r4, lr}\n        str     lr, [r0]\n\n        // Call real vfork. This may return twice. User code that runs between the first and the second return\n        // may clobber the stack frame of the interceptor; that's why it does not have a frame.\n        ldr     r0, .LCPI0_0\n.LPC0_0:\n        ldr     r0, [pc, r0]\n        mov     lr, pc\n        bx      r0\n\n        push    {r0, r4}\n        cmp     r0, #0\n        beq     .L_exit\n\n        // r0 != 0 => parent process. Clear stack shadow.\n        add     r0, sp, #8\n        bl      COMMON_INTERCEPTOR_HANDLE_VFORK\n\n.L_exit:\n        // Restore LR.\n        bl      COMMON_INTERCEPTOR_SPILL_AREA\n        ldr     lr, [r0]\n        pop     {r0, r4}\n\n        mov     pc, lr\n\n.LCPI0_0:\n        .long   _ZN14__interception10real_vforkE - (.LPC0_0+8)\n\nASM_SIZE(vfork)\n\n.weak vfork\n.set vfork, ASM_WRAPPER_NAME(vfork)\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S",
    "content": "#if defined(__i386__) && defined(__linux__)\n\n#include \"sanitizer_common/sanitizer_asm.h\"\n\n.comm _ZN14__interception10real_vforkE,4,4\n.globl ASM_WRAPPER_NAME(vfork)\nASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))\nASM_WRAPPER_NAME(vfork):\n        _CET_ENDBR\n        // Store return address in the spill area and tear down the stack frame.\n        sub     $12, %esp\n        call    COMMON_INTERCEPTOR_SPILL_AREA\n        mov     12(%esp), %ecx\n        mov     %ecx, (%eax)\n        add     $16, %esp\n\n        call    .L0$pb\n.L0$pb:\n        pop     %eax\n.Ltmp0:\n        add     $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %eax\n        call    *_ZN14__interception10real_vforkE@GOTOFF(%eax)\n\n        // Restore the stack frame.\n        // 12(%esp) return address\n        // 8(%esp) spill %ebx\n        // 4(%esp) spill REAL(vfork) return value\n        // (%esp) call frame (arg0) for __*_handle_vfork\n        sub     $16, %esp\n        mov     %ebx, 8(%esp)\n        mov     %eax, 4(%esp)\n\n        // Form GOT address in %ebx.\n        call    .L1$pb\n.L1$pb:\n        pop     %ebx\n.Ltmp1:\n        add     $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L1$pb), %ebx\n\n        // Restore original return address.\n        call    COMMON_INTERCEPTOR_SPILL_AREA\n        mov     (%eax), %ecx\n        mov     %ecx, 12(%esp)\n        mov     4(%esp), %eax\n\n        // Call handle_vfork in the parent process (%rax != 0).\n        test    %eax, %eax\n        je      .L_exit\n\n        lea     16(%esp), %ecx\n        mov     %ecx, (%esp)\n        call    COMMON_INTERCEPTOR_HANDLE_VFORK@PLT\n\n.L_exit:\n        mov     4(%esp), %eax\n        mov     8(%esp), %ebx\n        add     $12, %esp\n        ret\nASM_SIZE(vfork)\n\n.weak vfork\n.set vfork, ASM_WRAPPER_NAME(vfork)\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S",
    "content": "#if (defined(__riscv) && (__riscv_xlen == 64)) && defined(__linux__)\n\n#include \"sanitizer_common/sanitizer_asm.h\"\n\nASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)\n\n.comm _ZN14__interception10real_vforkE,8,8\n.globl ASM_WRAPPER_NAME(vfork)\nASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))\nASM_WRAPPER_NAME(vfork):\n        // Save ra in the off-stack spill area.\n        // allocate space on stack\n        addi    sp, sp, -16\n        // store ra value \n        sd      ra, 8(sp)\n        call    COMMON_INTERCEPTOR_SPILL_AREA\n        // restore previous values from stack\n        ld      ra, 8(sp)\n        // adjust stack\n        addi    sp, sp, 16\n        // store ra by x10\n        sd      ra, 0(x10)\n\n        // Call real vfork. This may return twice. User code that runs between the first and the second return\n        // may clobber the stack frame of the interceptor; that's why it does not have a frame.\n        la x10, _ZN14__interception10real_vforkE\n        ld x10, 0(x10)\n        jalr x10\n\n        // adjust stack\n        addi    sp, sp, -16\n        // store x10 by adjusted stack\n        sd      x10, 8(sp)\n        // jump to exit label if x10 is 0\n        beqz    x10, .L_exit\n\n        // x0 != 0 => parent process. Clear stack shadow.\n        // put old sp to x10\n        addi   x10, sp, 16\n        call   COMMON_INTERCEPTOR_HANDLE_VFORK\n\n.L_exit:\n        // Restore ra\n        call   COMMON_INTERCEPTOR_SPILL_AREA\n        ld     ra, 0(x10)\n        // load value by stack\n        ld     x10, 8(sp)\n        // adjust stack\n        addi   sp, sp, 16\n        ret\nASM_SIZE(vfork)\n\n.weak vfork\n.set vfork, ASM_WRAPPER_NAME(vfork)\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S",
    "content": "#if defined(__x86_64__) && defined(__linux__)\n\n#include \"sanitizer_common/sanitizer_asm.h\"\n\n.comm _ZN14__interception10real_vforkE,8,8\n.globl ASM_WRAPPER_NAME(vfork)\nASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))\nASM_WRAPPER_NAME(vfork):\n        _CET_ENDBR\n        // Store return address in the spill area and tear down the stack frame.\n        push    %rcx\n        call    COMMON_INTERCEPTOR_SPILL_AREA\n        pop     %rcx\n        pop     %rdi\n        mov     %rdi, (%rax)\n\n        call    *_ZN14__interception10real_vforkE(%rip)\n\n        // Restore return address from the spill area.\n        push    %rcx\n        push    %rax\n        call    COMMON_INTERCEPTOR_SPILL_AREA\n        mov     (%rax), %rdx\n        mov     %rdx, 8(%rsp)\n        mov     (%rsp), %rax\n\n        // Call handle_vfork in the parent process (%rax != 0).\n        test    %rax, %rax\n        je      .L_exit\n\n        lea     16(%rsp), %rdi\n        call    COMMON_INTERCEPTOR_HANDLE_VFORK@PLT\n\n.L_exit:\n        pop     %rax\n        ret\nASM_SIZE(vfork)\n\n.weak vfork\n.set vfork, ASM_WRAPPER_NAME(vfork)\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interface.inc",
    "content": "//===-- sanitizer_common_interface.inc ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Sanitizer Common interface list.\n//===----------------------------------------------------------------------===//\nINTERFACE_FUNCTION(__sanitizer_acquire_crash_state)\nINTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)\nINTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)\nINTERFACE_FUNCTION(__sanitizer_set_death_callback)\nINTERFACE_FUNCTION(__sanitizer_set_report_path)\nINTERFACE_FUNCTION(__sanitizer_set_report_fd)\nINTERFACE_FUNCTION(__sanitizer_get_report_path)\nINTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)\nINTERFACE_WEAK_FUNCTION(__sanitizer_on_print)\nINTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)\nINTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)\n// Sanitizer weak hooks\nINTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)\nINTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)\nINTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)\nINTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)\n// Stacktrace interface.\nINTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)\nINTERFACE_FUNCTION(__sanitizer_symbolize_global)\nINTERFACE_FUNCTION(__sanitizer_symbolize_pc)\n// Allocator interface.\nINTERFACE_FUNCTION(__sanitizer_get_allocated_size)\nINTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)\nINTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)\nINTERFACE_FUNCTION(__sanitizer_get_free_bytes)\nINTERFACE_FUNCTION(__sanitizer_get_heap_size)\nINTERFACE_FUNCTION(__sanitizer_get_ownership)\nINTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)\nINTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)\nINTERFACE_FUNCTION(__sanitizer_purge_allocator)\nINTERFACE_FUNCTION(__sanitizer_print_memory_profile)\nINTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)\nINTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_interface_posix.inc",
    "content": "//===-- sanitizer_common_interface_posix.inc ------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Sanitizer Common interface list only available for Posix systems.\n//===----------------------------------------------------------------------===//\nINTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)\nINTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)\nINTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)\nINTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)\nINTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)\nINTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_libcdep.cpp",
    "content": "//===-- sanitizer_common_libcdep.cpp --------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_allocator.h\"\n#include \"sanitizer_allocator_interface.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_procmaps.h\"\n#include \"sanitizer_stackdepot.h\"\n\nnamespace __sanitizer {\n\n#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO\n// Weak default implementation for when sanitizer_stackdepot is not linked in.\nSANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }\n\nvoid *BackgroundThread(void *arg) {\n  VPrintf(1, \"%s: Started BackgroundThread\\n\", SanitizerToolName);\n  const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;\n  const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;\n  const bool heap_profile = common_flags()->heap_profile;\n  uptr prev_reported_rss = 0;\n  uptr prev_reported_stack_depot_size = 0;\n  bool reached_soft_rss_limit = false;\n  uptr rss_during_last_reported_profile = 0;\n  while (true) {\n    SleepForMillis(100);\n    const uptr current_rss_mb = GetRSS() >> 20;\n    if (Verbosity()) {\n      // If RSS has grown 10% since last time, print some information.\n      if (prev_reported_rss * 11 / 10 < current_rss_mb) {\n        Printf(\"%s: RSS: %zdMb\\n\", SanitizerToolName, current_rss_mb);\n        prev_reported_rss = current_rss_mb;\n      }\n      // If stack depot has grown 10% since last time, print it too.\n      StackDepotStats stack_depot_stats = StackDepotGetStats();\n      if (prev_reported_stack_depot_size * 11 / 10 <\n          stack_depot_stats.allocated) {\n        Printf(\"%s: StackDepot: %zd ids; %zdM allocated\\n\", SanitizerToolName,\n               stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);\n        prev_reported_stack_depot_size = stack_depot_stats.allocated;\n      }\n    }\n    // Check RSS against the limit.\n    if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {\n      Report(\"%s: hard rss limit exhausted (%zdMb vs %zdMb)\\n\",\n             SanitizerToolName, hard_rss_limit_mb, current_rss_mb);\n      DumpProcessMap();\n      Die();\n    }\n    if (soft_rss_limit_mb) {\n      if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {\n        reached_soft_rss_limit = true;\n        Report(\"%s: soft rss limit exhausted (%zdMb vs %zdMb)\\n\",\n               SanitizerToolName, soft_rss_limit_mb, current_rss_mb);\n        SetRssLimitExceeded(true);\n      } else if (soft_rss_limit_mb >= current_rss_mb &&\n                 reached_soft_rss_limit) {\n        reached_soft_rss_limit = false;\n        SetRssLimitExceeded(false);\n      }\n    }\n    if (heap_profile &&\n        current_rss_mb > rss_during_last_reported_profile * 1.1) {\n      Printf(\"\\n\\nHEAP PROFILE at RSS %zdMb\\n\", current_rss_mb);\n      __sanitizer_print_memory_profile(90, 20);\n      rss_during_last_reported_profile = current_rss_mb;\n    }\n  }\n}\n\nvoid MaybeStartBackgroudThread() {\n  // Need to implement/test on other platforms.\n  // Start the background thread if one of the rss limits is given.\n  if (!common_flags()->hard_rss_limit_mb &&\n      !common_flags()->soft_rss_limit_mb &&\n      !common_flags()->heap_profile) return;\n  if (!&real_pthread_create) {\n    VPrintf(1, \"%s: real_pthread_create undefined\\n\", SanitizerToolName);\n    return;  // Can't spawn the thread anyway.\n  }\n\n  static bool started = false;\n  if (!started) {\n    started = true;\n    internal_start_thread(BackgroundThread, nullptr);\n  }\n}\n\n#  if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL\n#    pragma clang diagnostic push\n// We avoid global-constructors to be sure that globals are ready when\n// sanitizers need them. This can happend before global constructors executed.\n// Here we don't mind if thread is started on later stages.\n#    pragma clang diagnostic ignored \"-Wglobal-constructors\"\nstatic struct BackgroudThreadStarted {\n  BackgroudThreadStarted() { MaybeStartBackgroudThread(); }\n} background_thread_strarter UNUSED;\n#    pragma clang diagnostic pop\n#  endif\n#else\nvoid MaybeStartBackgroudThread() {}\n#endif\n\nvoid WriteToSyslog(const char *msg) {\n  InternalScopedString msg_copy;\n  msg_copy.append(\"%s\", msg);\n  const char *p = msg_copy.data();\n\n  // Print one line at a time.\n  // syslog, at least on Android, has an implicit message length limit.\n  while (char* q = internal_strchr(p, '\\n')) {\n    *q = '\\0';\n    WriteOneLineToSyslog(p);\n    p = q + 1;\n  }\n  // Print remaining characters, if there are any.\n  // Note that this will add an extra newline at the end.\n  // FIXME: buffer extra output. This would need a thread-local buffer, which\n  // on Android requires plugging into the tools (ex. ASan's) Thread class.\n  if (*p)\n    WriteOneLineToSyslog(p);\n}\n\nstatic void (*sandboxing_callback)();\nvoid SetSandboxingCallback(void (*f)()) {\n  sandboxing_callback = f;\n}\n\nuptr ReservedAddressRange::InitAligned(uptr size, uptr align,\n                                       const char *name) {\n  CHECK(IsPowerOfTwo(align));\n  if (align <= GetPageSizeCached())\n    return Init(size, name);\n  uptr start = Init(size + align, name);\n  start += align - (start & (align - 1));\n  return start;\n}\n\n#if !SANITIZER_FUCHSIA\n\n// Reserve memory range [beg, end].\n// We need to use inclusive range because end+1 may not be representable.\nvoid ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,\n                              bool madvise_shadow) {\n  CHECK_EQ((beg % GetMmapGranularity()), 0);\n  CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);\n  uptr size = end - beg + 1;\n  DecreaseTotalMmap(size);  // Don't count the shadow against mmap_limit_mb.\n  if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)\n                     : !MmapFixedNoReserve(beg, size, name)) {\n    Report(\n        \"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. \"\n        \"Perhaps you're using ulimit -v\\n\",\n        size);\n    Abort();\n  }\n  if (madvise_shadow && common_flags()->use_madv_dontdump)\n    DontDumpShadowMemory(beg, size);\n}\n\nvoid ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,\n                uptr zero_base_max_shadow_start) {\n  if (!size)\n    return;\n  void *res = MmapFixedNoAccess(addr, size, \"shadow gap\");\n  if (addr == (uptr)res)\n    return;\n  // A few pages at the start of the address space can not be protected.\n  // But we really want to protect as much as possible, to prevent this memory\n  // being returned as a result of a non-FIXED mmap().\n  if (addr == zero_base_shadow_start) {\n    uptr step = GetMmapGranularity();\n    while (size > step && addr < zero_base_max_shadow_start) {\n      addr += step;\n      size -= step;\n      void *res = MmapFixedNoAccess(addr, size, \"shadow gap\");\n      if (addr == (uptr)res)\n        return;\n    }\n  }\n\n  Report(\n      \"ERROR: Failed to protect the shadow gap. \"\n      \"%s cannot proceed correctly. ABORTING.\\n\",\n      SanitizerToolName);\n  DumpProcessMap();\n  Die();\n}\n\n#endif  // !SANITIZER_FUCHSIA\n\n#if !SANITIZER_WINDOWS && !SANITIZER_GO\n// Weak default implementation for when sanitizer_stackdepot is not linked in.\nSANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}\nstatic void StopStackDepotBackgroundThread() {\n  StackDepotStopBackgroundThread();\n}\n#else\n// SANITIZER_WEAK_ATTRIBUTE is unsupported.\nstatic void StopStackDepotBackgroundThread() {}\n#endif\n\n}  // namespace __sanitizer\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,\n                             __sanitizer_sandbox_arguments *args) {\n  __sanitizer::StopStackDepotBackgroundThread();\n  __sanitizer::PlatformPrepareForSandboxing(args);\n  if (__sanitizer::sandboxing_callback)\n    __sanitizer::sandboxing_callback();\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_nolibc.cpp",
    "content": "//===-- sanitizer_common_nolibc.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file contains stubs for libc function to facilitate optional use of\n// libc in no-libcdep sources.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_platform.h\"\n\nnamespace __sanitizer {\n\n// The Windows implementations of these functions use the win32 API directly,\n// bypassing libc.\n#if !SANITIZER_WINDOWS\n#if SANITIZER_LINUX\nvoid LogMessageOnPrintf(const char *str) {}\n#endif\nvoid WriteToSyslog(const char *buffer) {}\nvoid Abort() { internal__exit(1); }\nbool CreateDir(const char *pathname) { return false; }\n#endif // !SANITIZER_WINDOWS\n\n#if !SANITIZER_WINDOWS && !SANITIZER_MAC\nvoid ListOfModules::init() {}\nvoid InitializePlatformCommonFlags(CommonFlags *cf) {}\n#endif\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_common_syscalls.inc",
    "content": "//===-- sanitizer_common_syscalls.inc ---------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common syscalls handlers for tools like AddressSanitizer,\n// ThreadSanitizer, MemorySanitizer, etc.\n//\n// This file should be included into the tool's interceptor file,\n// which has to define it's own macros:\n//   COMMON_SYSCALL_PRE_READ_RANGE\n//          Called in prehook for regions that will be read by the kernel and\n//          must be initialized.\n//   COMMON_SYSCALL_PRE_WRITE_RANGE\n//          Called in prehook for regions that will be written to by the kernel\n//          and must be addressable. The actual write range may be smaller than\n//          reported in the prehook. See POST_WRITE_RANGE.\n//   COMMON_SYSCALL_POST_READ_RANGE\n//          Called in posthook for regions that were read by the kernel. Does\n//          not make much sense.\n//   COMMON_SYSCALL_POST_WRITE_RANGE\n//          Called in posthook for regions that were written to by the kernel\n//          and are now initialized.\n//   COMMON_SYSCALL_ACQUIRE(addr)\n//          Acquire memory visibility from addr.\n//   COMMON_SYSCALL_RELEASE(addr)\n//          Release memory visibility to addr.\n//   COMMON_SYSCALL_FD_CLOSE(fd)\n//          Called before closing file descriptor fd.\n//   COMMON_SYSCALL_FD_ACQUIRE(fd)\n//          Acquire memory visibility from fd.\n//   COMMON_SYSCALL_FD_RELEASE(fd)\n//          Release memory visibility to fd.\n//   COMMON_SYSCALL_PRE_FORK()\n//          Called before fork syscall.\n//   COMMON_SYSCALL_POST_FORK(long res)\n//          Called after fork syscall.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_LINUX\n\n#  include \"sanitizer_libc.h\"\n\n#  define PRE_SYSCALL(name) \\\n    SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name\n#  define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)\n#  define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)\n\n#  define POST_SYSCALL(name) \\\n    SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name\n#  define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)\n#  define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)\n\n#  ifndef COMMON_SYSCALL_ACQUIRE\n#    define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))\n#  endif\n\n#  ifndef COMMON_SYSCALL_RELEASE\n#    define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))\n#  endif\n\n#  ifndef COMMON_SYSCALL_FD_CLOSE\n#    define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))\n#  endif\n\n#  ifndef COMMON_SYSCALL_FD_ACQUIRE\n#    define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))\n#  endif\n\n#  ifndef COMMON_SYSCALL_FD_RELEASE\n#    define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))\n#  endif\n\n#  ifndef COMMON_SYSCALL_PRE_FORK\n#    define COMMON_SYSCALL_PRE_FORK() \\\n      {}\n#  endif\n\n#  ifndef COMMON_SYSCALL_POST_FORK\n#    define COMMON_SYSCALL_POST_FORK(res) \\\n      {}\n#  endif\n\n// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).\n\nextern \"C\" {\nstruct sanitizer_kernel_iovec {\n  void *iov_base;\n  unsigned long iov_len;\n};\n\nstruct sanitizer_kernel_msghdr {\n  void *msg_name;\n  int msg_namelen;\n  struct sanitizer_kernel_iovec *msg_iov;\n  unsigned long msg_iovlen;\n  void *msg_control;\n  unsigned long msg_controllen;\n  unsigned msg_flags;\n};\n\nstruct sanitizer_kernel_mmsghdr {\n  struct sanitizer_kernel_msghdr msg_hdr;\n  unsigned msg_len;\n};\n\nstruct sanitizer_kernel_timespec {\n  long tv_sec;\n  long tv_nsec;\n};\n\nstruct sanitizer_kernel_timeval {\n  long tv_sec;\n  long tv_usec;\n};\n\nstruct sanitizer_kernel_rusage {\n  struct sanitizer_kernel_timeval ru_timeval[2];\n  long ru_long[14];\n};\n\nstruct sanitizer_kernel_sockaddr {\n  unsigned short sa_family;\n  char sa_data[14];\n};\n\n// Real sigset size is always passed as a syscall argument.\n// Declare it \"void\" to catch sizeof(kernel_sigset_t).\ntypedef void kernel_sigset_t;\n\nstatic void kernel_write_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,\n                               SIZE_T maxlen) {\n  for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {\n    SSIZE_T sz = Min(iovec[i].iov_len, maxlen);\n    POST_WRITE(iovec[i].iov_base, sz);\n    maxlen -= sz;\n  }\n}\n\n// This functions uses POST_READ, because it needs to run after syscall to know\n// the real read range.\nstatic void kernel_read_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,\n                              SIZE_T maxlen) {\n  POST_READ(iovec, sizeof(*iovec) * iovlen);\n  for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {\n    SSIZE_T sz = Min(iovec[i].iov_len, maxlen);\n    POST_READ(iovec[i].iov_base, sz);\n    maxlen -= sz;\n  }\n}\n\nPRE_SYSCALL(recvmsg)(long sockfd, sanitizer_kernel_msghdr *msg, long flags) {\n  PRE_READ(msg, sizeof(*msg));\n}\n\nPOST_SYSCALL(recvmsg)\n(long res, long sockfd, sanitizer_kernel_msghdr *msg, long flags) {\n  if (res >= 0) {\n    if (msg) {\n      for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {\n        POST_WRITE(msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);\n      }\n      POST_WRITE(msg->msg_control, msg->msg_controllen);\n    }\n  }\n}\n\nPRE_SYSCALL(recvmmsg)\n(long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags, void *timeout) {\n  PRE_READ(msg, vlen * sizeof(*msg));\n}\n\nPOST_SYSCALL(recvmmsg)\n(long res, long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags,\n void *timeout) {\n  if (res >= 0) {\n    if (msg) {\n      for (unsigned long i = 0; i < msg->msg_hdr.msg_iovlen; ++i) {\n        POST_WRITE(msg->msg_hdr.msg_iov[i].iov_base,\n                   msg->msg_hdr.msg_iov[i].iov_len);\n      }\n      POST_WRITE(msg->msg_hdr.msg_control, msg->msg_hdr.msg_controllen);\n      POST_WRITE(&msg->msg_len, sizeof(msg->msg_len));\n    }\n    if (timeout)\n      POST_WRITE(timeout, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(read)(long fd, void *buf, uptr count) {\n  if (buf) {\n    PRE_WRITE(buf, count);\n  }\n}\n\nPOST_SYSCALL(read)(long res, long fd, void *buf, uptr count) {\n  if (res > 0 && buf) {\n    POST_WRITE(buf, res);\n  }\n}\n\nPRE_SYSCALL(time)(void *tloc) {}\n\nPOST_SYSCALL(time)(long res, void *tloc) {\n  if (res >= 0) {\n    if (tloc)\n      POST_WRITE(tloc, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(stime)(void *tptr) {}\n\nPOST_SYSCALL(stime)(long res, void *tptr) {\n  if (res >= 0) {\n    if (tptr)\n      POST_WRITE(tptr, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(gettimeofday)(void *tv, void *tz) {}\n\nPOST_SYSCALL(gettimeofday)(long res, void *tv, void *tz) {\n  if (res >= 0) {\n    if (tv)\n      POST_WRITE(tv, timeval_sz);\n    if (tz)\n      POST_WRITE(tz, struct_timezone_sz);\n  }\n}\n\nPRE_SYSCALL(settimeofday)(void *tv, void *tz) {}\n\nPOST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {\n  if (res >= 0) {\n    if (tv)\n      POST_WRITE(tv, timeval_sz);\n    if (tz)\n      POST_WRITE(tz, struct_timezone_sz);\n  }\n}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(adjtimex)(void *txc_p) {}\n\nPOST_SYSCALL(adjtimex)(long res, void *txc_p) {\n  if (res >= 0) {\n    if (txc_p)\n      POST_WRITE(txc_p, struct_timex_sz);\n  }\n}\n#  endif\n\nPRE_SYSCALL(times)(void *tbuf) {}\n\nPOST_SYSCALL(times)(long res, void *tbuf) {\n  if (res >= 0) {\n    if (tbuf)\n      POST_WRITE(tbuf, struct_tms_sz);\n  }\n}\n\nPRE_SYSCALL(gettid)() {}\n\nPOST_SYSCALL(gettid)(long res) {}\n\nPRE_SYSCALL(nanosleep)(void *rqtp, void *rmtp) {}\n\nPOST_SYSCALL(nanosleep)(long res, void *rqtp, void *rmtp) {\n  if (res >= 0) {\n    if (rqtp)\n      POST_WRITE(rqtp, struct_timespec_sz);\n    if (rmtp)\n      POST_WRITE(rmtp, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(alarm)(long seconds) {}\n\nPOST_SYSCALL(alarm)(long res, long seconds) {}\n\nPRE_SYSCALL(getpid)() {}\n\nPOST_SYSCALL(getpid)(long res) {}\n\nPRE_SYSCALL(getppid)() {}\n\nPOST_SYSCALL(getppid)(long res) {}\n\nPRE_SYSCALL(getuid)() {}\n\nPOST_SYSCALL(getuid)(long res) {}\n\nPRE_SYSCALL(geteuid)() {}\n\nPOST_SYSCALL(geteuid)(long res) {}\n\nPRE_SYSCALL(getgid)() {}\n\nPOST_SYSCALL(getgid)(long res) {}\n\nPRE_SYSCALL(getegid)() {}\n\nPOST_SYSCALL(getegid)(long res) {}\n\nPRE_SYSCALL(getresuid)(void *ruid, void *euid, void *suid) {}\n\nPOST_SYSCALL(getresuid)(long res, void *ruid, void *euid, void *suid) {\n  if (res >= 0) {\n    if (ruid)\n      POST_WRITE(ruid, sizeof(unsigned));\n    if (euid)\n      POST_WRITE(euid, sizeof(unsigned));\n    if (suid)\n      POST_WRITE(suid, sizeof(unsigned));\n  }\n}\n\nPRE_SYSCALL(getresgid)(void *rgid, void *egid, void *sgid) {}\n\nPOST_SYSCALL(getresgid)(long res, void *rgid, void *egid, void *sgid) {\n  if (res >= 0) {\n    if (rgid)\n      POST_WRITE(rgid, sizeof(unsigned));\n    if (egid)\n      POST_WRITE(egid, sizeof(unsigned));\n    if (sgid)\n      POST_WRITE(sgid, sizeof(unsigned));\n  }\n}\n\nPRE_SYSCALL(getpgid)(long pid) {}\n\nPOST_SYSCALL(getpgid)(long res, long pid) {}\n\nPRE_SYSCALL(getpgrp)() {}\n\nPOST_SYSCALL(getpgrp)(long res) {}\n\nPRE_SYSCALL(getsid)(long pid) {}\n\nPOST_SYSCALL(getsid)(long res, long pid) {}\n\nPRE_SYSCALL(getgroups)(long gidsetsize, void *grouplist) {}\n\nPOST_SYSCALL(getgroups)\n(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {\n  if (res >= 0) {\n    if (grouplist)\n      POST_WRITE(grouplist, res * sizeof(*grouplist));\n  }\n}\n\nPRE_SYSCALL(setregid)(long rgid, long egid) {}\n\nPOST_SYSCALL(setregid)(long res, long rgid, long egid) {}\n\nPRE_SYSCALL(setgid)(long gid) {}\n\nPOST_SYSCALL(setgid)(long res, long gid) {}\n\nPRE_SYSCALL(setreuid)(long ruid, long euid) {}\n\nPOST_SYSCALL(setreuid)(long res, long ruid, long euid) {}\n\nPRE_SYSCALL(setuid)(long uid) {}\n\nPOST_SYSCALL(setuid)(long res, long uid) {}\n\nPRE_SYSCALL(setresuid)(long ruid, long euid, long suid) {}\n\nPOST_SYSCALL(setresuid)(long res, long ruid, long euid, long suid) {}\n\nPRE_SYSCALL(setresgid)(long rgid, long egid, long sgid) {}\n\nPOST_SYSCALL(setresgid)(long res, long rgid, long egid, long sgid) {}\n\nPRE_SYSCALL(setfsuid)(long uid) {}\n\nPOST_SYSCALL(setfsuid)(long res, long uid) {}\n\nPRE_SYSCALL(setfsgid)(long gid) {}\n\nPOST_SYSCALL(setfsgid)(long res, long gid) {}\n\nPRE_SYSCALL(setpgid)(long pid, long pgid) {}\n\nPOST_SYSCALL(setpgid)(long res, long pid, long pgid) {}\n\nPRE_SYSCALL(setsid)() {}\n\nPOST_SYSCALL(setsid)(long res) {}\n\nPRE_SYSCALL(setgroups)(long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {\n  if (grouplist)\n    POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));\n}\n\nPOST_SYSCALL(setgroups)\n(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {}\n\nPRE_SYSCALL(acct)(const void *name) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(acct)(long res, const void *name) {}\n\nPRE_SYSCALL(capget)(void *header, void *dataptr) {\n  if (header)\n    PRE_READ(header, __user_cap_header_struct_sz);\n}\n\nPOST_SYSCALL(capget)(long res, void *header, void *dataptr) {\n  if (res >= 0)\n    if (dataptr)\n      POST_WRITE(dataptr, __user_cap_data_struct_sz);\n}\n\nPRE_SYSCALL(capset)(void *header, const void *data) {\n  if (header)\n    PRE_READ(header, __user_cap_header_struct_sz);\n  if (data)\n    PRE_READ(data, __user_cap_data_struct_sz);\n}\n\nPOST_SYSCALL(capset)(long res, void *header, const void *data) {}\n\nPRE_SYSCALL(personality)(long personality) {}\n\nPOST_SYSCALL(personality)(long res, long personality) {}\n\nPRE_SYSCALL(sigpending)(void *set) {}\n\nPOST_SYSCALL(sigpending)(long res, void *set) {\n  if (res >= 0) {\n    if (set)\n      POST_WRITE(set, old_sigset_t_sz);\n  }\n}\n\nPRE_SYSCALL(sigprocmask)(long how, void *set, void *oset) {}\n\nPOST_SYSCALL(sigprocmask)(long res, long how, void *set, void *oset) {\n  if (res >= 0) {\n    if (set)\n      POST_WRITE(set, old_sigset_t_sz);\n    if (oset)\n      POST_WRITE(oset, old_sigset_t_sz);\n  }\n}\n\nPRE_SYSCALL(getitimer)(long which, void *value) {}\n\nPOST_SYSCALL(getitimer)(long res, long which, void *value) {\n  if (res >= 0) {\n    if (value)\n      POST_WRITE(value, struct_itimerval_sz);\n  }\n}\n\nPRE_SYSCALL(setitimer)(long which, void *value, void *ovalue) {}\n\nPOST_SYSCALL(setitimer)(long res, long which, void *value, void *ovalue) {\n  if (res >= 0) {\n    if (value)\n      POST_WRITE(value, struct_itimerval_sz);\n    if (ovalue)\n      POST_WRITE(ovalue, struct_itimerval_sz);\n  }\n}\n\nPRE_SYSCALL(timer_create)\n(long which_clock, void *timer_event_spec, void *created_timer_id) {}\n\nPOST_SYSCALL(timer_create)\n(long res, long which_clock, void *timer_event_spec, void *created_timer_id) {\n  if (res >= 0) {\n    if (timer_event_spec)\n      POST_WRITE(timer_event_spec, struct_sigevent_sz);\n    if (created_timer_id)\n      POST_WRITE(created_timer_id, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(timer_gettime)(long timer_id, void *setting) {}\n\nPOST_SYSCALL(timer_gettime)(long res, long timer_id, void *setting) {\n  if (res >= 0) {\n    if (setting)\n      POST_WRITE(setting, struct_itimerspec_sz);\n  }\n}\n\nPRE_SYSCALL(timer_getoverrun)(long timer_id) {}\n\nPOST_SYSCALL(timer_getoverrun)(long res, long timer_id) {}\n\nPRE_SYSCALL(timer_settime)\n(long timer_id, long flags, const void *new_setting, void *old_setting) {\n  if (new_setting)\n    PRE_READ(new_setting, struct_itimerspec_sz);\n}\n\nPOST_SYSCALL(timer_settime)\n(long res, long timer_id, long flags, const void *new_setting,\n void *old_setting) {\n  if (res >= 0) {\n    if (old_setting)\n      POST_WRITE(old_setting, struct_itimerspec_sz);\n  }\n}\n\nPRE_SYSCALL(timer_delete)(long timer_id) {}\n\nPOST_SYSCALL(timer_delete)(long res, long timer_id) {}\n\nPRE_SYSCALL(clock_settime)(long which_clock, const void *tp) {\n  if (tp)\n    PRE_READ(tp, struct_timespec_sz);\n}\n\nPOST_SYSCALL(clock_settime)(long res, long which_clock, const void *tp) {}\n\nPRE_SYSCALL(clock_gettime)(long which_clock, void *tp) {}\n\nPOST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {\n  if (res >= 0) {\n    if (tp)\n      POST_WRITE(tp, struct_timespec_sz);\n  }\n}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}\n\nPOST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {\n  if (res >= 0) {\n    if (tx)\n      POST_WRITE(tx, struct_timex_sz);\n  }\n}\n#  endif\n\nPRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}\n\nPOST_SYSCALL(clock_getres)(long res, long which_clock, void *tp) {\n  if (res >= 0) {\n    if (tp)\n      POST_WRITE(tp, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(clock_nanosleep)\n(long which_clock, long flags, const void *rqtp, void *rmtp) {\n  if (rqtp)\n    PRE_READ(rqtp, struct_timespec_sz);\n}\n\nPOST_SYSCALL(clock_nanosleep)\n(long res, long which_clock, long flags, const void *rqtp, void *rmtp) {\n  if (res >= 0) {\n    if (rmtp)\n      POST_WRITE(rmtp, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(nice)(long increment) {}\n\nPOST_SYSCALL(nice)(long res, long increment) {}\n\nPRE_SYSCALL(sched_setscheduler)(long pid, long policy, void *param) {}\n\nPOST_SYSCALL(sched_setscheduler)(long res, long pid, long policy, void *param) {\n  if (res >= 0) {\n    if (param)\n      POST_WRITE(param, struct_sched_param_sz);\n  }\n}\n\nPRE_SYSCALL(sched_setparam)(long pid, void *param) {\n  if (param)\n    PRE_READ(param, struct_sched_param_sz);\n}\n\nPOST_SYSCALL(sched_setparam)(long res, long pid, void *param) {}\n\nPRE_SYSCALL(sched_getscheduler)(long pid) {}\n\nPOST_SYSCALL(sched_getscheduler)(long res, long pid) {}\n\nPRE_SYSCALL(sched_getparam)(long pid, void *param) {}\n\nPOST_SYSCALL(sched_getparam)(long res, long pid, void *param) {\n  if (res >= 0) {\n    if (param)\n      POST_WRITE(param, struct_sched_param_sz);\n  }\n}\n\nPRE_SYSCALL(sched_setaffinity)(long pid, long len, void *user_mask_ptr) {\n  if (user_mask_ptr)\n    PRE_READ(user_mask_ptr, len);\n}\n\nPOST_SYSCALL(sched_setaffinity)\n(long res, long pid, long len, void *user_mask_ptr) {}\n\nPRE_SYSCALL(sched_getaffinity)(long pid, long len, void *user_mask_ptr) {}\n\nPOST_SYSCALL(sched_getaffinity)\n(long res, long pid, long len, void *user_mask_ptr) {\n  if (res >= 0) {\n    if (user_mask_ptr)\n      POST_WRITE(user_mask_ptr, len);\n  }\n}\n\nPRE_SYSCALL(sched_yield)() {}\n\nPOST_SYSCALL(sched_yield)(long res) {}\n\nPRE_SYSCALL(sched_get_priority_max)(long policy) {}\n\nPOST_SYSCALL(sched_get_priority_max)(long res, long policy) {}\n\nPRE_SYSCALL(sched_get_priority_min)(long policy) {}\n\nPOST_SYSCALL(sched_get_priority_min)(long res, long policy) {}\n\nPRE_SYSCALL(sched_rr_get_interval)(long pid, void *interval) {}\n\nPOST_SYSCALL(sched_rr_get_interval)(long res, long pid, void *interval) {\n  if (res >= 0) {\n    if (interval)\n      POST_WRITE(interval, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(setpriority)(long which, long who, long niceval) {}\n\nPOST_SYSCALL(setpriority)(long res, long which, long who, long niceval) {}\n\nPRE_SYSCALL(getpriority)(long which, long who) {}\n\nPOST_SYSCALL(getpriority)(long res, long which, long who) {}\n\nPRE_SYSCALL(shutdown)(long arg0, long arg1) {}\n\nPOST_SYSCALL(shutdown)(long res, long arg0, long arg1) {}\n\nPRE_SYSCALL(reboot)(long magic1, long magic2, long cmd, void *arg) {}\n\nPOST_SYSCALL(reboot)(long res, long magic1, long magic2, long cmd, void *arg) {}\n\nPRE_SYSCALL(restart_syscall)() {}\n\nPOST_SYSCALL(restart_syscall)(long res) {}\n\nPRE_SYSCALL(kexec_load)\n(long entry, long nr_segments, void *segments, long flags) {}\n\nPOST_SYSCALL(kexec_load)\n(long res, long entry, long nr_segments, void *segments, long flags) {\n  if (res >= 0) {\n    if (segments)\n      POST_WRITE(segments, struct_kexec_segment_sz);\n  }\n}\n\nPRE_SYSCALL(exit)(long error_code) {}\n\nPOST_SYSCALL(exit)(long res, long error_code) {}\n\nPRE_SYSCALL(exit_group)(long error_code) {}\n\nPOST_SYSCALL(exit_group)(long res, long error_code) {}\n\nPRE_SYSCALL(wait4)(long pid, void *stat_addr, long options, void *ru) {}\n\nPOST_SYSCALL(wait4)\n(long res, long pid, void *stat_addr, long options, void *ru) {\n  if (res >= 0) {\n    if (stat_addr)\n      POST_WRITE(stat_addr, sizeof(int));\n    if (ru)\n      POST_WRITE(ru, struct_rusage_sz);\n  }\n}\n\nPRE_SYSCALL(waitid)\n(long which, long pid, void *infop, long options, void *ru) {}\n\nPOST_SYSCALL(waitid)\n(long res, long which, long pid, void *infop, long options, void *ru) {\n  if (res >= 0) {\n    if (infop)\n      POST_WRITE(infop, siginfo_t_sz);\n    if (ru)\n      POST_WRITE(ru, struct_rusage_sz);\n  }\n}\n\nPRE_SYSCALL(waitpid)(long pid, void *stat_addr, long options) {}\n\nPOST_SYSCALL(waitpid)(long res, long pid, void *stat_addr, long options) {\n  if (res >= 0) {\n    if (stat_addr)\n      POST_WRITE(stat_addr, sizeof(int));\n  }\n}\n\nPRE_SYSCALL(set_tid_address)(void *tidptr) {}\n\nPOST_SYSCALL(set_tid_address)(long res, void *tidptr) {\n  if (res >= 0) {\n    if (tidptr)\n      POST_WRITE(tidptr, sizeof(int));\n  }\n}\n\nPRE_SYSCALL(init_module)(void *umod, long len, const void *uargs) {\n  if (uargs)\n    PRE_READ(uargs, __sanitizer::internal_strlen((const char *)uargs) + 1);\n}\n\nPOST_SYSCALL(init_module)(long res, void *umod, long len, const void *uargs) {}\n\nPRE_SYSCALL(delete_module)(const void *name_user, long flags) {\n  if (name_user)\n    PRE_READ(name_user,\n             __sanitizer::internal_strlen((const char *)name_user) + 1);\n}\n\nPOST_SYSCALL(delete_module)(long res, const void *name_user, long flags) {}\n\nPRE_SYSCALL(rt_sigprocmask)(long how, void *set, void *oset, long sigsetsize) {}\n\nPOST_SYSCALL(rt_sigprocmask)\n(long res, long how, kernel_sigset_t *set, kernel_sigset_t *oset,\n long sigsetsize) {\n  if (res >= 0) {\n    if (set)\n      POST_WRITE(set, sigsetsize);\n    if (oset)\n      POST_WRITE(oset, sigsetsize);\n  }\n}\n\nPRE_SYSCALL(rt_sigpending)(void *set, long sigsetsize) {}\n\nPOST_SYSCALL(rt_sigpending)(long res, kernel_sigset_t *set, long sigsetsize) {\n  if (res >= 0) {\n    if (set)\n      POST_WRITE(set, sigsetsize);\n  }\n}\n\nPRE_SYSCALL(rt_sigtimedwait)\n(const kernel_sigset_t *uthese, void *uinfo, const void *uts, long sigsetsize) {\n  if (uthese)\n    PRE_READ(uthese, sigsetsize);\n  if (uts)\n    PRE_READ(uts, struct_timespec_sz);\n}\n\nPOST_SYSCALL(rt_sigtimedwait)\n(long res, const void *uthese, void *uinfo, const void *uts, long sigsetsize) {\n  if (res >= 0) {\n    if (uinfo)\n      POST_WRITE(uinfo, siginfo_t_sz);\n  }\n}\n\nPRE_SYSCALL(rt_tgsigqueueinfo)(long tgid, long pid, long sig, void *uinfo) {}\n\nPOST_SYSCALL(rt_tgsigqueueinfo)\n(long res, long tgid, long pid, long sig, void *uinfo) {\n  if (res >= 0) {\n    if (uinfo)\n      POST_WRITE(uinfo, siginfo_t_sz);\n  }\n}\n\nPRE_SYSCALL(kill)(long pid, long sig) {}\n\nPOST_SYSCALL(kill)(long res, long pid, long sig) {}\n\nPRE_SYSCALL(tgkill)(long tgid, long pid, long sig) {}\n\nPOST_SYSCALL(tgkill)(long res, long tgid, long pid, long sig) {}\n\nPRE_SYSCALL(tkill)(long pid, long sig) {}\n\nPOST_SYSCALL(tkill)(long res, long pid, long sig) {}\n\nPRE_SYSCALL(rt_sigqueueinfo)(long pid, long sig, void *uinfo) {}\n\nPOST_SYSCALL(rt_sigqueueinfo)(long res, long pid, long sig, void *uinfo) {\n  if (res >= 0) {\n    if (uinfo)\n      POST_WRITE(uinfo, siginfo_t_sz);\n  }\n}\n\nPRE_SYSCALL(sgetmask)() {}\n\nPOST_SYSCALL(sgetmask)(long res) {}\n\nPRE_SYSCALL(ssetmask)(long newmask) {}\n\nPOST_SYSCALL(ssetmask)(long res, long newmask) {}\n\nPRE_SYSCALL(signal)(long sig, long handler) {}\n\nPOST_SYSCALL(signal)(long res, long sig, long handler) {}\n\nPRE_SYSCALL(pause)() {}\n\nPOST_SYSCALL(pause)(long res) {}\n\nPRE_SYSCALL(sync)() {}\n\nPOST_SYSCALL(sync)(long res) {}\n\nPRE_SYSCALL(fsync)(long fd) {}\n\nPOST_SYSCALL(fsync)(long res, long fd) {}\n\nPRE_SYSCALL(fdatasync)(long fd) {}\n\nPOST_SYSCALL(fdatasync)(long res, long fd) {}\n\nPRE_SYSCALL(bdflush)(long func, long data) {}\n\nPOST_SYSCALL(bdflush)(long res, long func, long data) {}\n\nPRE_SYSCALL(mount)\n(void *dev_name, void *dir_name, void *type, long flags, void *data) {}\n\nPOST_SYSCALL(mount)\n(long res, void *dev_name, void *dir_name, void *type, long flags, void *data) {\n  if (res >= 0) {\n    if (dev_name)\n      POST_WRITE(dev_name,\n                 __sanitizer::internal_strlen((const char *)dev_name) + 1);\n    if (dir_name)\n      POST_WRITE(dir_name,\n                 __sanitizer::internal_strlen((const char *)dir_name) + 1);\n    if (type)\n      POST_WRITE(type, __sanitizer::internal_strlen((const char *)type) + 1);\n  }\n}\n\nPRE_SYSCALL(umount)(void *name, long flags) {}\n\nPOST_SYSCALL(umount)(long res, void *name, long flags) {\n  if (res >= 0) {\n    if (name)\n      POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  }\n}\n\nPRE_SYSCALL(oldumount)(void *name) {}\n\nPOST_SYSCALL(oldumount)(long res, void *name) {\n  if (res >= 0) {\n    if (name)\n      POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  }\n}\n\nPRE_SYSCALL(truncate)(const void *path, long length) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(truncate)(long res, const void *path, long length) {}\n\nPRE_SYSCALL(ftruncate)(long fd, long length) {}\n\nPOST_SYSCALL(ftruncate)(long res, long fd, long length) {}\n\nPRE_SYSCALL(stat)(const void *filename, void *statbuf) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct___old_kernel_stat_sz);\n  }\n}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(statfs)(const void *path, void *buf) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(statfs)(long res, const void *path, void *buf) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, struct_statfs_sz);\n  }\n}\n\nPRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, struct_statfs64_sz);\n  }\n}\n\nPRE_SYSCALL(fstatfs)(long fd, void *buf) {}\n\nPOST_SYSCALL(fstatfs)(long res, long fd, void *buf) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, struct_statfs_sz);\n  }\n}\n\nPRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}\n\nPOST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, struct_statfs64_sz);\n  }\n}\n#  endif  // !SANITIZER_ANDROID\n\nPRE_SYSCALL(lstat)(const void *filename, void *statbuf) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(lstat)(long res, const void *filename, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct___old_kernel_stat_sz);\n  }\n}\n\nPRE_SYSCALL(fstat)(long fd, void *statbuf) {}\n\nPOST_SYSCALL(fstat)(long res, long fd, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct___old_kernel_stat_sz);\n  }\n}\n\nPRE_SYSCALL(newstat)(const void *filename, void *statbuf) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(newstat)(long res, const void *filename, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat_sz);\n  }\n}\n\nPRE_SYSCALL(newlstat)(const void *filename, void *statbuf) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(newlstat)(long res, const void *filename, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat_sz);\n  }\n}\n\nPRE_SYSCALL(newfstat)(long fd, void *statbuf) {}\n\nPOST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat_sz);\n  }\n}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(ustat)(long dev, void *ubuf) {}\n\nPOST_SYSCALL(ustat)(long res, long dev, void *ubuf) {\n  if (res >= 0) {\n    if (ubuf)\n      POST_WRITE(ubuf, struct_ustat_sz);\n  }\n}\n#  endif  // !SANITIZER_ANDROID\n\nPRE_SYSCALL(stat64)(const void *filename, void *statbuf) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(stat64)(long res, const void *filename, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat64_sz);\n  }\n}\n\nPRE_SYSCALL(fstat64)(long fd, void *statbuf) {}\n\nPOST_SYSCALL(fstat64)(long res, long fd, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat64_sz);\n  }\n}\n\nPRE_SYSCALL(lstat64)(const void *filename, void *statbuf) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(lstat64)(long res, const void *filename, void *statbuf) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat64_sz);\n  }\n}\n\nPRE_SYSCALL(setxattr)\n(const void *path, const void *name, const void *value, long size, long flags) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  if (value)\n    PRE_READ(value, size);\n}\n\nPOST_SYSCALL(setxattr)\n(long res, const void *path, const void *name, const void *value, long size,\n long flags) {}\n\nPRE_SYSCALL(lsetxattr)\n(const void *path, const void *name, const void *value, long size, long flags) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  if (value)\n    PRE_READ(value, size);\n}\n\nPOST_SYSCALL(lsetxattr)\n(long res, const void *path, const void *name, const void *value, long size,\n long flags) {}\n\nPRE_SYSCALL(fsetxattr)\n(long fd, const void *name, const void *value, long size, long flags) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  if (value)\n    PRE_READ(value, size);\n}\n\nPOST_SYSCALL(fsetxattr)\n(long res, long fd, const void *name, const void *value, long size,\n long flags) {}\n\nPRE_SYSCALL(getxattr)\n(const void *path, const void *name, void *value, long size) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(getxattr)\n(long res, const void *path, const void *name, void *value, long size) {\n  if (size && res > 0) {\n    if (value)\n      POST_WRITE(value, res);\n  }\n}\n\nPRE_SYSCALL(lgetxattr)\n(const void *path, const void *name, void *value, long size) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(lgetxattr)\n(long res, const void *path, const void *name, void *value, long size) {\n  if (size && res > 0) {\n    if (value)\n      POST_WRITE(value, res);\n  }\n}\n\nPRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(fgetxattr)\n(long res, long fd, const void *name, void *value, long size) {\n  if (size && res > 0) {\n    if (value)\n      POST_WRITE(value, res);\n  }\n}\n\nPRE_SYSCALL(listxattr)(const void *path, void *list, long size) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {\n  if (size && res > 0) {\n    if (list)\n      POST_WRITE(list, res);\n  }\n}\n\nPRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {\n  if (size && res > 0) {\n    if (list)\n      POST_WRITE(list, res);\n  }\n}\n\nPRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}\n\nPOST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {\n  if (size && res > 0) {\n    if (list)\n      POST_WRITE(list, res);\n  }\n}\n\nPRE_SYSCALL(removexattr)(const void *path, const void *name) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(removexattr)(long res, const void *path, const void *name) {}\n\nPRE_SYSCALL(lremovexattr)(const void *path, const void *name) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(lremovexattr)(long res, const void *path, const void *name) {}\n\nPRE_SYSCALL(fremovexattr)(long fd, const void *name) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(fremovexattr)(long res, long fd, const void *name) {}\n\nPRE_SYSCALL(brk)(long brk) {}\n\nPOST_SYSCALL(brk)(long res, long brk) {}\n\nPRE_SYSCALL(mprotect)(long start, long len, long prot) {}\n\nPOST_SYSCALL(mprotect)(long res, long start, long len, long prot) {}\n\nPRE_SYSCALL(mremap)\n(long addr, long old_len, long new_len, long flags, long new_addr) {}\n\nPOST_SYSCALL(mremap)\n(long res, long addr, long old_len, long new_len, long flags, long new_addr) {}\n\nPRE_SYSCALL(remap_file_pages)\n(long start, long size, long prot, long pgoff, long flags) {}\n\nPOST_SYSCALL(remap_file_pages)\n(long res, long start, long size, long prot, long pgoff, long flags) {}\n\nPRE_SYSCALL(msync)(long start, long len, long flags) {}\n\nPOST_SYSCALL(msync)(long res, long start, long len, long flags) {}\n\nPRE_SYSCALL(munmap)(long addr, long len) {}\n\nPOST_SYSCALL(munmap)(long res, long addr, long len) {}\n\nPRE_SYSCALL(mlock)(long start, long len) {}\n\nPOST_SYSCALL(mlock)(long res, long start, long len) {}\n\nPRE_SYSCALL(munlock)(long start, long len) {}\n\nPOST_SYSCALL(munlock)(long res, long start, long len) {}\n\nPRE_SYSCALL(mlockall)(long flags) {}\n\nPOST_SYSCALL(mlockall)(long res, long flags) {}\n\nPRE_SYSCALL(munlockall)() {}\n\nPOST_SYSCALL(munlockall)(long res) {}\n\nPRE_SYSCALL(madvise)(long start, long len, long behavior) {}\n\nPOST_SYSCALL(madvise)(long res, long start, long len, long behavior) {}\n\nPRE_SYSCALL(mincore)(long start, long len, void *vec) {}\n\nPOST_SYSCALL(mincore)(long res, long start, long len, void *vec) {\n  if (res >= 0) {\n    if (vec) {\n      POST_WRITE(vec, (len + GetPageSizeCached() - 1) / GetPageSizeCached());\n    }\n  }\n}\n\nPRE_SYSCALL(pivot_root)(const void *new_root, const void *put_old) {\n  if (new_root)\n    PRE_READ(new_root,\n             __sanitizer::internal_strlen((const char *)new_root) + 1);\n  if (put_old)\n    PRE_READ(put_old, __sanitizer::internal_strlen((const char *)put_old) + 1);\n}\n\nPOST_SYSCALL(pivot_root)(long res, const void *new_root, const void *put_old) {}\n\nPRE_SYSCALL(chroot)(const void *filename) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(chroot)(long res, const void *filename) {}\n\nPRE_SYSCALL(mknod)(const void *filename, long mode, long dev) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(mknod)(long res, const void *filename, long mode, long dev) {}\n\nPRE_SYSCALL(link)(const void *oldname, const void *newname) {\n  if (oldname)\n    PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);\n  if (newname)\n    PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);\n}\n\nPOST_SYSCALL(link)(long res, const void *oldname, const void *newname) {}\n\nPRE_SYSCALL(symlink)(const void *old, const void *new_) {\n  if (old)\n    PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);\n  if (new_)\n    PRE_READ(new_, __sanitizer::internal_strlen((const char *)new_) + 1);\n}\n\nPOST_SYSCALL(symlink)(long res, const void *old, const void *new_) {}\n\nPRE_SYSCALL(unlink)(const void *pathname) {\n  if (pathname)\n    PRE_READ(pathname,\n             __sanitizer::internal_strlen((const char *)pathname) + 1);\n}\n\nPOST_SYSCALL(unlink)(long res, const void *pathname) {}\n\nPRE_SYSCALL(rename)(const void *oldname, const void *newname) {\n  if (oldname)\n    PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);\n  if (newname)\n    PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);\n}\n\nPOST_SYSCALL(rename)(long res, const void *oldname, const void *newname) {}\n\nPRE_SYSCALL(chmod)(const void *filename, long mode) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(chmod)(long res, const void *filename, long mode) {}\n\nPRE_SYSCALL(fchmod)(long fd, long mode) {}\n\nPOST_SYSCALL(fchmod)(long res, long fd, long mode) {}\n\nPRE_SYSCALL(fcntl)(long fd, long cmd, long arg) {}\n\nPOST_SYSCALL(fcntl)(long res, long fd, long cmd, long arg) {}\n\nPRE_SYSCALL(fcntl64)(long fd, long cmd, long arg) {}\n\nPOST_SYSCALL(fcntl64)(long res, long fd, long cmd, long arg) {}\n\nPRE_SYSCALL(pipe)(void *fildes) {}\n\nPOST_SYSCALL(pipe)(long res, void *fildes) {\n  if (res >= 0)\n    if (fildes)\n      POST_WRITE(fildes, sizeof(int) * 2);\n}\n\nPRE_SYSCALL(pipe2)(void *fildes, long flags) {}\n\nPOST_SYSCALL(pipe2)(long res, void *fildes, long flags) {\n  if (res >= 0)\n    if (fildes)\n      POST_WRITE(fildes, sizeof(int) * 2);\n}\n\nPRE_SYSCALL(dup)(long fildes) {}\n\nPOST_SYSCALL(dup)(long res, long fildes) {}\n\nPRE_SYSCALL(dup2)(long oldfd, long newfd) {}\n\nPOST_SYSCALL(dup2)(long res, long oldfd, long newfd) {}\n\nPRE_SYSCALL(dup3)(long oldfd, long newfd, long flags) {}\n\nPOST_SYSCALL(dup3)(long res, long oldfd, long newfd, long flags) {}\n\nPRE_SYSCALL(ioperm)(long from, long num, long on) {}\n\nPOST_SYSCALL(ioperm)(long res, long from, long num, long on) {}\n\nPRE_SYSCALL(ioctl)(long fd, long cmd, long arg) {}\n\nPOST_SYSCALL(ioctl)(long res, long fd, long cmd, long arg) {}\n\nPRE_SYSCALL(flock)(long fd, long cmd) {}\n\nPOST_SYSCALL(flock)(long res, long fd, long cmd) {}\n\nPRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {\n  if (ctx)\n    PRE_WRITE(ctx, sizeof(*ctx));\n}\n\nPOST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {\n  if (res >= 0) {\n    if (ctx)\n      POST_WRITE(ctx, sizeof(*ctx));\n    // (*ctx) is actually a pointer to a kernel mapped page, and there are\n    // people out there who are crazy enough to peek into that page's 32-byte\n    // header.\n    if (*ctx)\n      POST_WRITE(*ctx, 32);\n  }\n}\n\nPRE_SYSCALL(io_destroy)(long ctx) {}\n\nPOST_SYSCALL(io_destroy)(long res, long ctx) {}\n\nPRE_SYSCALL(io_getevents)\n(long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,\n void *timeout) {\n  if (timeout)\n    PRE_READ(timeout, struct_timespec_sz);\n}\n\nPOST_SYSCALL(io_getevents)\n(long res, long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,\n void *timeout) {\n  if (res >= 0) {\n    if (ioevpp)\n      POST_WRITE(ioevpp, res * sizeof(*ioevpp));\n    if (timeout)\n      POST_WRITE(timeout, struct_timespec_sz);\n  }\n  for (long i = 0; i < res; i++) {\n    // We synchronize io_submit -> io_getevents/io_cancel using the\n    // user-provided data context. Data is not necessary a pointer, it can be\n    // an int, 0 or whatever; acquire/release will correctly handle this.\n    // This scheme can lead to false negatives, e.g. when all operations\n    // synchronize on 0. But there does not seem to be a better solution\n    // (except wrapping all operations in own context, which is unreliable).\n    // We can not reliably extract fildes in io_getevents.\n    COMMON_SYSCALL_ACQUIRE((void *)ioevpp[i].data);\n  }\n}\n\nPRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {\n  for (long i = 0; i < nr; ++i) {\n    uptr op = iocbpp[i]->aio_lio_opcode;\n    void *data = (void *)iocbpp[i]->aio_data;\n    void *buf = (void *)iocbpp[i]->aio_buf;\n    uptr len = (uptr)iocbpp[i]->aio_nbytes;\n    if (op == iocb_cmd_pwrite && buf && len) {\n      PRE_READ(buf, len);\n    } else if (op == iocb_cmd_pread && buf && len) {\n      POST_WRITE(buf, len);\n    } else if (op == iocb_cmd_pwritev) {\n      __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;\n      for (uptr v = 0; v < len; v++)\n        PRE_READ(iovec[v].iov_base, iovec[v].iov_len);\n    } else if (op == iocb_cmd_preadv) {\n      __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;\n      for (uptr v = 0; v < len; v++)\n        POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);\n    }\n    // See comment in io_getevents.\n    COMMON_SYSCALL_RELEASE(data);\n  }\n}\n\nPOST_SYSCALL(io_submit)\n(long res, long ctx_id, long nr, __sanitizer_iocb **iocbpp) {}\n\nPRE_SYSCALL(io_cancel)\n(long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {}\n\nPOST_SYSCALL(io_cancel)\n(long res, long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {\n  if (res == 0) {\n    if (result) {\n      // See comment in io_getevents.\n      COMMON_SYSCALL_ACQUIRE((void *)result->data);\n      POST_WRITE(result, sizeof(*result));\n    }\n    if (iocb)\n      POST_WRITE(iocb, sizeof(*iocb));\n  }\n}\n\nPRE_SYSCALL(sendfile)(long out_fd, long in_fd, void *offset, long count) {}\n\nPOST_SYSCALL(sendfile)\n(long res, long out_fd, long in_fd, __sanitizer___kernel_off_t *offset,\n long count) {\n  if (res >= 0) {\n    if (offset)\n      POST_WRITE(offset, sizeof(*offset));\n  }\n}\n\nPRE_SYSCALL(sendfile64)(long out_fd, long in_fd, void *offset, long count) {}\n\nPOST_SYSCALL(sendfile64)\n(long res, long out_fd, long in_fd, __sanitizer___kernel_loff_t *offset,\n long count) {\n  if (res >= 0) {\n    if (offset)\n      POST_WRITE(offset, sizeof(*offset));\n  }\n}\n\nPRE_SYSCALL(readlink)(const void *path, void *buf, long bufsiz) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(readlink)(long res, const void *path, void *buf, long bufsiz) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);\n  }\n}\n\nPRE_SYSCALL(creat)(const void *pathname, long mode) {\n  if (pathname)\n    PRE_READ(pathname,\n             __sanitizer::internal_strlen((const char *)pathname) + 1);\n}\n\nPOST_SYSCALL(creat)(long res, const void *pathname, long mode) {}\n\nPRE_SYSCALL(open)(const void *filename, long flags, long mode) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(open)(long res, const void *filename, long flags, long mode) {}\n\nPRE_SYSCALL(close)(long fd) { COMMON_SYSCALL_FD_CLOSE((int)fd); }\n\nPOST_SYSCALL(close)(long res, long fd) {}\n\nPRE_SYSCALL(access)(const void *filename, long mode) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(access)(long res, const void *filename, long mode) {}\n\nPRE_SYSCALL(vhangup)() {}\n\nPOST_SYSCALL(vhangup)(long res) {}\n\nPRE_SYSCALL(chown)(const void *filename, long user, long group) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(chown)(long res, const void *filename, long user, long group) {}\n\nPRE_SYSCALL(lchown)(const void *filename, long user, long group) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(lchown)(long res, const void *filename, long user, long group) {}\n\nPRE_SYSCALL(fchown)(long fd, long user, long group) {}\n\nPOST_SYSCALL(fchown)(long res, long fd, long user, long group) {}\n\n#  if SANITIZER_USES_UID16_SYSCALLS\nPRE_SYSCALL(chown16)(const void *filename, long user, long group) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(chown16)(long res, const void *filename, long user, long group) {}\n\nPRE_SYSCALL(lchown16)(const void *filename, long user, long group) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(lchown16)(long res, const void *filename, long user, long group) {}\n\nPRE_SYSCALL(fchown16)(long fd, long user, long group) {}\n\nPOST_SYSCALL(fchown16)(long res, long fd, long user, long group) {}\n\nPRE_SYSCALL(setregid16)(long rgid, long egid) {}\n\nPOST_SYSCALL(setregid16)(long res, long rgid, long egid) {}\n\nPRE_SYSCALL(setgid16)(long gid) {}\n\nPOST_SYSCALL(setgid16)(long res, long gid) {}\n\nPRE_SYSCALL(setreuid16)(long ruid, long euid) {}\n\nPOST_SYSCALL(setreuid16)(long res, long ruid, long euid) {}\n\nPRE_SYSCALL(setuid16)(long uid) {}\n\nPOST_SYSCALL(setuid16)(long res, long uid) {}\n\nPRE_SYSCALL(setresuid16)(long ruid, long euid, long suid) {}\n\nPOST_SYSCALL(setresuid16)(long res, long ruid, long euid, long suid) {}\n\nPRE_SYSCALL(getresuid16)(void *ruid, void *euid, void *suid) {}\n\nPOST_SYSCALL(getresuid16)\n(long res, __sanitizer___kernel_old_uid_t *ruid,\n __sanitizer___kernel_old_uid_t *euid, __sanitizer___kernel_old_uid_t *suid) {\n  if (res >= 0) {\n    if (ruid)\n      POST_WRITE(ruid, sizeof(*ruid));\n    if (euid)\n      POST_WRITE(euid, sizeof(*euid));\n    if (suid)\n      POST_WRITE(suid, sizeof(*suid));\n  }\n}\n\nPRE_SYSCALL(setresgid16)(long rgid, long egid, long sgid) {}\n\nPOST_SYSCALL(setresgid16)(long res, long rgid, long egid, long sgid) {}\n\nPRE_SYSCALL(getresgid16)(void *rgid, void *egid, void *sgid) {}\n\nPOST_SYSCALL(getresgid16)\n(long res, __sanitizer___kernel_old_gid_t *rgid,\n __sanitizer___kernel_old_gid_t *egid, __sanitizer___kernel_old_gid_t *sgid) {\n  if (res >= 0) {\n    if (rgid)\n      POST_WRITE(rgid, sizeof(*rgid));\n    if (egid)\n      POST_WRITE(egid, sizeof(*egid));\n    if (sgid)\n      POST_WRITE(sgid, sizeof(*sgid));\n  }\n}\n\nPRE_SYSCALL(setfsuid16)(long uid) {}\n\nPOST_SYSCALL(setfsuid16)(long res, long uid) {}\n\nPRE_SYSCALL(setfsgid16)(long gid) {}\n\nPOST_SYSCALL(setfsgid16)(long res, long gid) {}\n\nPRE_SYSCALL(getgroups16)\n(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}\n\nPOST_SYSCALL(getgroups16)\n(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {\n  if (res >= 0) {\n    if (grouplist)\n      POST_WRITE(grouplist, res * sizeof(*grouplist));\n  }\n}\n\nPRE_SYSCALL(setgroups16)\n(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {\n  if (grouplist)\n    POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));\n}\n\nPOST_SYSCALL(setgroups16)\n(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}\n\nPRE_SYSCALL(getuid16)() {}\n\nPOST_SYSCALL(getuid16)(long res) {}\n\nPRE_SYSCALL(geteuid16)() {}\n\nPOST_SYSCALL(geteuid16)(long res) {}\n\nPRE_SYSCALL(getgid16)() {}\n\nPOST_SYSCALL(getgid16)(long res) {}\n\nPRE_SYSCALL(getegid16)() {}\n\nPOST_SYSCALL(getegid16)(long res) {}\n#  endif  // SANITIZER_USES_UID16_SYSCALLS\n\nPRE_SYSCALL(utime)(void *filename, void *times) {}\n\nPOST_SYSCALL(utime)(long res, void *filename, void *times) {\n  if (res >= 0) {\n    if (filename)\n      POST_WRITE(filename,\n                 __sanitizer::internal_strlen((const char *)filename) + 1);\n    if (times)\n      POST_WRITE(times, struct_utimbuf_sz);\n  }\n}\n\nPRE_SYSCALL(utimes)(void *filename, void *utimes) {}\n\nPOST_SYSCALL(utimes)(long res, void *filename, void *utimes) {\n  if (res >= 0) {\n    if (filename)\n      POST_WRITE(filename,\n                 __sanitizer::internal_strlen((const char *)filename) + 1);\n    if (utimes)\n      POST_WRITE(utimes, timeval_sz);\n  }\n}\n\nPRE_SYSCALL(lseek)(long fd, long offset, long origin) {}\n\nPOST_SYSCALL(lseek)(long res, long fd, long offset, long origin) {}\n\nPRE_SYSCALL(llseek)\n(long fd, long offset_high, long offset_low, void *result, long origin) {}\n\nPOST_SYSCALL(llseek)\n(long res, long fd, long offset_high, long offset_low, void *result,\n long origin) {\n  if (res >= 0) {\n    if (result)\n      POST_WRITE(result, sizeof(long long));\n  }\n}\n\nPRE_SYSCALL(readv)(long fd, const __sanitizer_iovec *vec, long vlen) {}\n\nPOST_SYSCALL(readv)\n(long res, long fd, const __sanitizer_iovec *vec, long vlen) {\n  if (res >= 0) {\n    if (vec)\n      kernel_write_iovec(vec, vlen, res);\n  }\n}\n\nPRE_SYSCALL(write)(long fd, const void *buf, long count) {\n  if (buf)\n    PRE_READ(buf, count);\n}\n\nPOST_SYSCALL(write)(long res, long fd, const void *buf, long count) {}\n\nPRE_SYSCALL(writev)(long fd, const __sanitizer_iovec *vec, long vlen) {}\n\nPOST_SYSCALL(writev)\n(long res, long fd, const __sanitizer_iovec *vec, long vlen) {\n  if (res >= 0) {\n    if (vec)\n      kernel_read_iovec(vec, vlen, res);\n  }\n}\n\n#  ifdef _LP64\nPRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos) {}\n\nPOST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, res);\n  }\n}\n\nPRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos) {\n  if (buf)\n    PRE_READ(buf, count);\n}\n\nPOST_SYSCALL(pwrite64)\n(long res, long fd, const void *buf, long count, long pos) {}\n#  else\nPRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos0, long pos1) {}\n\nPOST_SYSCALL(pread64)\n(long res, long fd, void *buf, long count, long pos0, long pos1) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, res);\n  }\n}\n\nPRE_SYSCALL(pwrite64)\n(long fd, const void *buf, long count, long pos0, long pos1) {\n  if (buf)\n    PRE_READ(buf, count);\n}\n\nPOST_SYSCALL(pwrite64)\n(long res, long fd, const void *buf, long count, long pos0, long pos1) {}\n#  endif\n\nPRE_SYSCALL(preadv)\n(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}\n\nPOST_SYSCALL(preadv)\n(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,\n long pos_h) {\n  if (res >= 0) {\n    if (vec)\n      kernel_write_iovec(vec, vlen, res);\n  }\n}\n\nPRE_SYSCALL(pwritev)\n(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}\n\nPOST_SYSCALL(pwritev)\n(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,\n long pos_h) {\n  if (res >= 0) {\n    if (vec)\n      kernel_read_iovec(vec, vlen, res);\n  }\n}\n\nPRE_SYSCALL(getcwd)(void *buf, long size) {}\n\nPOST_SYSCALL(getcwd)(long res, void *buf, long size) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);\n  }\n}\n\nPRE_SYSCALL(mkdir)(const void *pathname, long mode) {\n  if (pathname)\n    PRE_READ(pathname,\n             __sanitizer::internal_strlen((const char *)pathname) + 1);\n}\n\nPOST_SYSCALL(mkdir)(long res, const void *pathname, long mode) {}\n\nPRE_SYSCALL(chdir)(const void *filename) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(chdir)(long res, const void *filename) {}\n\nPRE_SYSCALL(fchdir)(long fd) {}\n\nPOST_SYSCALL(fchdir)(long res, long fd) {}\n\nPRE_SYSCALL(rmdir)(const void *pathname) {\n  if (pathname)\n    PRE_READ(pathname,\n             __sanitizer::internal_strlen((const char *)pathname) + 1);\n}\n\nPOST_SYSCALL(rmdir)(long res, const void *pathname) {}\n\nPRE_SYSCALL(lookup_dcookie)(u64 cookie64, void *buf, long len) {}\n\nPOST_SYSCALL(lookup_dcookie)(long res, u64 cookie64, void *buf, long len) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);\n  }\n}\n\nPRE_SYSCALL(quotactl)(long cmd, const void *special, long id, void *addr) {\n  if (special)\n    PRE_READ(special, __sanitizer::internal_strlen((const char *)special) + 1);\n}\n\nPOST_SYSCALL(quotactl)\n(long res, long cmd, const void *special, long id, void *addr) {}\n\nPRE_SYSCALL(getdents)(long fd, void *dirent, long count) {}\n\nPOST_SYSCALL(getdents)(long res, long fd, void *dirent, long count) {\n  if (res >= 0) {\n    if (dirent)\n      POST_WRITE(dirent, res);\n  }\n}\n\nPRE_SYSCALL(getdents64)(long fd, void *dirent, long count) {}\n\nPOST_SYSCALL(getdents64)(long res, long fd, void *dirent, long count) {\n  if (res >= 0) {\n    if (dirent)\n      POST_WRITE(dirent, res);\n  }\n}\n\nPRE_SYSCALL(setsockopt)\n(long fd, long level, long optname, void *optval, long optlen) {}\n\nPOST_SYSCALL(setsockopt)\n(long res, long fd, long level, long optname, void *optval, long optlen) {\n  if (res >= 0) {\n    if (optval)\n      POST_WRITE(optval,\n                 __sanitizer::internal_strlen((const char *)optval) + 1);\n  }\n}\n\nPRE_SYSCALL(getsockopt)\n(long fd, long level, long optname, void *optval, void *optlen) {}\n\nPOST_SYSCALL(getsockopt)\n(long res, long fd, long level, long optname, void *optval, void *optlen) {\n  if (res >= 0) {\n    if (optval)\n      POST_WRITE(optval,\n                 __sanitizer::internal_strlen((const char *)optval) + 1);\n    if (optlen)\n      POST_WRITE(optlen, sizeof(int));\n  }\n}\n\nPRE_SYSCALL(bind)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}\n\nPOST_SYSCALL(bind)\n(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n  }\n}\n\nPRE_SYSCALL(connect)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}\n\nPOST_SYSCALL(connect)\n(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n  }\n}\n\nPRE_SYSCALL(accept)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}\n\nPOST_SYSCALL(accept)\n(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n    if (arg2)\n      POST_WRITE(arg2, sizeof(unsigned));\n  }\n}\n\nPRE_SYSCALL(accept4)\n(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {}\n\nPOST_SYSCALL(accept4)\n(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n    if (arg2)\n      POST_WRITE(arg2, sizeof(unsigned));\n  }\n}\n\nPRE_SYSCALL(getsockname)\n(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}\n\nPOST_SYSCALL(getsockname)\n(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n    if (arg2)\n      POST_WRITE(arg2, sizeof(unsigned));\n  }\n}\n\nPRE_SYSCALL(getpeername)\n(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}\n\nPOST_SYSCALL(getpeername)\n(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n    if (arg2)\n      POST_WRITE(arg2, sizeof(unsigned));\n  }\n}\n\nPRE_SYSCALL(send)(long arg0, void *arg1, long arg2, long arg3) {}\n\nPOST_SYSCALL(send)(long res, long arg0, void *arg1, long arg2, long arg3) {\n  if (res) {\n    if (arg1)\n      POST_READ(arg1, res);\n  }\n}\n\nPRE_SYSCALL(sendto)\n(long arg0, void *arg1, long arg2, long arg3, sanitizer_kernel_sockaddr *arg4,\n long arg5) {}\n\nPOST_SYSCALL(sendto)\n(long res, long arg0, void *arg1, long arg2, long arg3,\n sanitizer_kernel_sockaddr *arg4, long arg5) {\n  if (res >= 0) {\n    if (arg1)\n      POST_READ(arg1, res);\n    if (arg4)\n      POST_WRITE(arg4, sizeof(*arg4));\n  }\n}\n\nPRE_SYSCALL(sendmsg)(long fd, void *msg, long flags) {}\n\nPOST_SYSCALL(sendmsg)(long res, long fd, void *msg, long flags) {\n  // FIXME: POST_READ\n}\n\nPRE_SYSCALL(sendmmsg)(long fd, void *msg, long vlen, long flags) {}\n\nPOST_SYSCALL(sendmmsg)(long res, long fd, void *msg, long vlen, long flags) {\n  // FIXME: POST_READ\n}\n\nPRE_SYSCALL(recv)(long arg0, void *buf, long len, long flags) {}\n\nPOST_SYSCALL(recv)(long res, void *buf, long len, long flags) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, res);\n  }\n}\n\nPRE_SYSCALL(recvfrom)\n(long arg0, void *buf, long len, long flags, sanitizer_kernel_sockaddr *arg4,\n void *arg5) {}\n\nPOST_SYSCALL(recvfrom)\n(long res, long arg0, void *buf, long len, long flags,\n sanitizer_kernel_sockaddr *arg4, void *arg5) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, res);\n    if (arg4)\n      POST_WRITE(arg4, sizeof(*arg4));\n    if (arg5)\n      POST_WRITE(arg5, sizeof(int));\n  }\n}\n\nPRE_SYSCALL(socket)(long arg0, long arg1, long arg2) {}\n\nPOST_SYSCALL(socket)(long res, long arg0, long arg1, long arg2) {}\n\nPRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}\n\nPOST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {\n  if (res >= 0)\n    if (sv)\n      POST_WRITE(sv, sizeof(int) * 2);\n}\n\nPRE_SYSCALL(socketcall)(long call, void *args) {}\n\nPOST_SYSCALL(socketcall)(long res, long call, void *args) {\n  if (res >= 0) {\n    if (args)\n      POST_WRITE(args, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(listen)(long arg0, long arg1) {}\n\nPOST_SYSCALL(listen)(long res, long arg0, long arg1) {}\n\nPRE_SYSCALL(poll)(void *ufds, long nfds, long timeout) {}\n\nPOST_SYSCALL(poll)\n(long res, __sanitizer_pollfd *ufds, long nfds, long timeout) {\n  if (res >= 0) {\n    if (ufds)\n      POST_WRITE(ufds, nfds * sizeof(*ufds));\n  }\n}\n\nPRE_SYSCALL(select)\n(long n, __sanitizer___kernel_fd_set *inp, __sanitizer___kernel_fd_set *outp,\n __sanitizer___kernel_fd_set *exp, void *tvp) {}\n\nPOST_SYSCALL(select)\n(long res, long n, __sanitizer___kernel_fd_set *inp,\n __sanitizer___kernel_fd_set *outp, __sanitizer___kernel_fd_set *exp,\n void *tvp) {\n  if (res >= 0) {\n    if (inp)\n      POST_WRITE(inp, sizeof(*inp));\n    if (outp)\n      POST_WRITE(outp, sizeof(*outp));\n    if (exp)\n      POST_WRITE(exp, sizeof(*exp));\n    if (tvp)\n      POST_WRITE(tvp, timeval_sz);\n  }\n}\n\nPRE_SYSCALL(old_select)(void *arg) {}\n\nPOST_SYSCALL(old_select)(long res, void *arg) {}\n\nPRE_SYSCALL(epoll_create)(long size) {}\n\nPOST_SYSCALL(epoll_create)(long res, long size) {}\n\nPRE_SYSCALL(epoll_create1)(long flags) {}\n\nPOST_SYSCALL(epoll_create1)(long res, long flags) {}\n\nPRE_SYSCALL(epoll_ctl)(long epfd, long op, long fd, void *event) {}\n\nPOST_SYSCALL(epoll_ctl)(long res, long epfd, long op, long fd, void *event) {\n  if (res >= 0) {\n    if (event)\n      POST_WRITE(event, struct_epoll_event_sz);\n  }\n}\n\nPRE_SYSCALL(epoll_wait)\n(long epfd, void *events, long maxevents, long timeout) {}\n\nPOST_SYSCALL(epoll_wait)\n(long res, long epfd, void *events, long maxevents, long timeout) {\n  if (res >= 0) {\n    if (events)\n      POST_WRITE(events, res * struct_epoll_event_sz);\n  }\n}\n\nPRE_SYSCALL(epoll_pwait)\n(long epfd, void *events, long maxevents, long timeout,\n const kernel_sigset_t *sigmask, long sigsetsize) {\n  if (sigmask)\n    PRE_READ(sigmask, sigsetsize);\n}\n\nPOST_SYSCALL(epoll_pwait)\n(long res, long epfd, void *events, long maxevents, long timeout,\n const void *sigmask, long sigsetsize) {\n  if (res >= 0) {\n    if (events)\n      POST_WRITE(events, res * struct_epoll_event_sz);\n  }\n}\n\nPRE_SYSCALL(epoll_pwait2)\n(long epfd, void *events, long maxevents,\n const sanitizer_kernel_timespec *timeout, const kernel_sigset_t *sigmask,\n long sigsetsize) {\n  if (timeout)\n    PRE_READ(timeout, sizeof(timeout));\n  if (sigmask)\n    PRE_READ(sigmask, sigsetsize);\n}\n\nPOST_SYSCALL(epoll_pwait2)\n(long res, long epfd, void *events, long maxevents,\n const sanitizer_kernel_timespec *timeout, const void *sigmask,\n long sigsetsize) {\n  if (res >= 0) {\n    if (events)\n      POST_WRITE(events, res * struct_epoll_event_sz);\n  }\n}\n\nPRE_SYSCALL(gethostname)(void *name, long len) {}\n\nPOST_SYSCALL(gethostname)(long res, void *name, long len) {\n  if (res >= 0) {\n    if (name)\n      POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  }\n}\n\nPRE_SYSCALL(sethostname)(void *name, long len) {}\n\nPOST_SYSCALL(sethostname)(long res, void *name, long len) {\n  if (res >= 0) {\n    if (name)\n      POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  }\n}\n\nPRE_SYSCALL(setdomainname)(void *name, long len) {}\n\nPOST_SYSCALL(setdomainname)(long res, void *name, long len) {\n  if (res >= 0) {\n    if (name)\n      POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);\n  }\n}\n\nPRE_SYSCALL(newuname)(void *name) {}\n\nPOST_SYSCALL(newuname)(long res, void *name) {\n  if (res >= 0) {\n    if (name)\n      POST_WRITE(name, struct_new_utsname_sz);\n  }\n}\n\nPRE_SYSCALL(uname)(void *arg0) {}\n\nPOST_SYSCALL(uname)(long res, void *arg0) {\n  if (res >= 0) {\n    if (arg0)\n      POST_WRITE(arg0, struct_old_utsname_sz);\n  }\n}\n\nPRE_SYSCALL(olduname)(void *arg0) {}\n\nPOST_SYSCALL(olduname)(long res, void *arg0) {\n  if (res >= 0) {\n    if (arg0)\n      POST_WRITE(arg0, struct_oldold_utsname_sz);\n  }\n}\n\nPRE_SYSCALL(getrlimit)(long resource, void *rlim) {}\n\nPOST_SYSCALL(getrlimit)(long res, long resource, void *rlim) {\n  if (res >= 0) {\n    if (rlim)\n      POST_WRITE(rlim, struct_rlimit_sz);\n  }\n}\n\nPRE_SYSCALL(old_getrlimit)(long resource, void *rlim) {}\n\nPOST_SYSCALL(old_getrlimit)(long res, long resource, void *rlim) {\n  if (res >= 0) {\n    if (rlim)\n      POST_WRITE(rlim, struct_rlimit_sz);\n  }\n}\n\nPRE_SYSCALL(setrlimit)(long resource, void *rlim) {}\n\nPOST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {\n  if (res >= 0) {\n    if (rlim)\n      POST_WRITE(rlim, struct_rlimit_sz);\n  }\n}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(prlimit64)\n(long pid, long resource, const void *new_rlim, void *old_rlim) {\n  if (new_rlim)\n    PRE_READ(new_rlim, struct_rlimit64_sz);\n}\n\nPOST_SYSCALL(prlimit64)\n(long res, long pid, long resource, const void *new_rlim, void *old_rlim) {\n  if (res >= 0) {\n    if (old_rlim)\n      POST_WRITE(old_rlim, struct_rlimit64_sz);\n  }\n}\n#  endif\n\nPRE_SYSCALL(getrusage)(long who, void *ru) {}\n\nPOST_SYSCALL(getrusage)(long res, long who, void *ru) {\n  if (res >= 0) {\n    if (ru)\n      POST_WRITE(ru, struct_rusage_sz);\n  }\n}\n\nPRE_SYSCALL(umask)(long mask) {}\n\nPOST_SYSCALL(umask)(long res, long mask) {}\n\nPRE_SYSCALL(msgget)(long key, long msgflg) {}\n\nPOST_SYSCALL(msgget)(long res, long key, long msgflg) {}\n\nPRE_SYSCALL(msgsnd)(long msqid, void *msgp, long msgsz, long msgflg) {\n  if (msgp)\n    PRE_READ(msgp, msgsz);\n}\n\nPOST_SYSCALL(msgsnd)\n(long res, long msqid, void *msgp, long msgsz, long msgflg) {}\n\nPRE_SYSCALL(msgrcv)\n(long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {}\n\nPOST_SYSCALL(msgrcv)\n(long res, long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {\n  if (res >= 0) {\n    if (msgp)\n      POST_WRITE(msgp, res);\n  }\n}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}\n\nPOST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, struct_msqid_ds_sz);\n  }\n}\n#  endif\n\nPRE_SYSCALL(semget)(long key, long nsems, long semflg) {}\n\nPOST_SYSCALL(semget)(long res, long key, long nsems, long semflg) {}\n\nPRE_SYSCALL(semop)(long semid, void *sops, long nsops) {}\n\nPOST_SYSCALL(semop)(long res, long semid, void *sops, long nsops) {}\n\nPRE_SYSCALL(semctl)(long semid, long semnum, long cmd, void *arg) {}\n\nPOST_SYSCALL(semctl)(long res, long semid, long semnum, long cmd, void *arg) {}\n\nPRE_SYSCALL(semtimedop)\n(long semid, void *sops, long nsops, const void *timeout) {\n  if (timeout)\n    PRE_READ(timeout, struct_timespec_sz);\n}\n\nPOST_SYSCALL(semtimedop)\n(long res, long semid, void *sops, long nsops, const void *timeout) {}\n\nPRE_SYSCALL(shmat)(long shmid, void *shmaddr, long shmflg) {}\n\nPOST_SYSCALL(shmat)(long res, long shmid, void *shmaddr, long shmflg) {\n  if (res >= 0) {\n    if (shmaddr)\n      POST_WRITE(shmaddr,\n                 __sanitizer::internal_strlen((const char *)shmaddr) + 1);\n  }\n}\n\nPRE_SYSCALL(shmget)(long key, long size, long flag) {}\n\nPOST_SYSCALL(shmget)(long res, long key, long size, long flag) {}\n\nPRE_SYSCALL(shmdt)(void *shmaddr) {}\n\nPOST_SYSCALL(shmdt)(long res, void *shmaddr) {\n  if (res >= 0) {\n    if (shmaddr)\n      POST_WRITE(shmaddr,\n                 __sanitizer::internal_strlen((const char *)shmaddr) + 1);\n  }\n}\n\nPRE_SYSCALL(ipc)\n(long call, long first, long second, long third, void *ptr, long fifth) {}\n\nPOST_SYSCALL(ipc)\n(long res, long call, long first, long second, long third, void *ptr,\n long fifth) {}\n\n#  if !SANITIZER_ANDROID\nPRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}\n\nPOST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));\n  }\n}\n\nPRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(mq_open)\n(long res, const void *name, long oflag, long mode, void *attr) {\n  if (res >= 0) {\n    if (attr)\n      POST_WRITE(attr, struct_mq_attr_sz);\n  }\n}\n\nPRE_SYSCALL(mq_unlink)(const void *name) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(mq_unlink)(long res, const void *name) {}\n\nPRE_SYSCALL(mq_timedsend)\n(long mqdes, const void *msg_ptr, long msg_len, long msg_prio,\n const void *abs_timeout) {\n  if (msg_ptr)\n    PRE_READ(msg_ptr, msg_len);\n  if (abs_timeout)\n    PRE_READ(abs_timeout, struct_timespec_sz);\n}\n\nPOST_SYSCALL(mq_timedsend)\n(long res, long mqdes, const void *msg_ptr, long msg_len, long msg_prio,\n const void *abs_timeout) {}\n\nPRE_SYSCALL(mq_timedreceive)\n(long mqdes, void *msg_ptr, long msg_len, void *msg_prio,\n const void *abs_timeout) {\n  if (abs_timeout)\n    PRE_READ(abs_timeout, struct_timespec_sz);\n}\n\nPOST_SYSCALL(mq_timedreceive)\n(long res, long mqdes, void *msg_ptr, long msg_len, int *msg_prio,\n const void *abs_timeout) {\n  if (res >= 0) {\n    if (msg_ptr)\n      POST_WRITE(msg_ptr, res);\n    if (msg_prio)\n      POST_WRITE(msg_prio, sizeof(*msg_prio));\n  }\n}\n\nPRE_SYSCALL(mq_notify)(long mqdes, const void *notification) {\n  if (notification)\n    PRE_READ(notification, struct_sigevent_sz);\n}\n\nPOST_SYSCALL(mq_notify)(long res, long mqdes, const void *notification) {}\n\nPRE_SYSCALL(mq_getsetattr)(long mqdes, const void *mqstat, void *omqstat) {\n  if (mqstat)\n    PRE_READ(mqstat, struct_mq_attr_sz);\n}\n\nPOST_SYSCALL(mq_getsetattr)\n(long res, long mqdes, const void *mqstat, void *omqstat) {\n  if (res >= 0) {\n    if (omqstat)\n      POST_WRITE(omqstat, struct_mq_attr_sz);\n  }\n}\n#  endif  // SANITIZER_ANDROID\n\nPRE_SYSCALL(pciconfig_iobase)(long which, long bus, long devfn) {}\n\nPOST_SYSCALL(pciconfig_iobase)(long res, long which, long bus, long devfn) {}\n\nPRE_SYSCALL(pciconfig_read)\n(long bus, long dfn, long off, long len, void *buf) {}\n\nPOST_SYSCALL(pciconfig_read)\n(long res, long bus, long dfn, long off, long len, void *buf) {}\n\nPRE_SYSCALL(pciconfig_write)\n(long bus, long dfn, long off, long len, void *buf) {}\n\nPOST_SYSCALL(pciconfig_write)\n(long res, long bus, long dfn, long off, long len, void *buf) {}\n\nPRE_SYSCALL(swapon)(const void *specialfile, long swap_flags) {\n  if (specialfile)\n    PRE_READ(specialfile,\n             __sanitizer::internal_strlen((const char *)specialfile) + 1);\n}\n\nPOST_SYSCALL(swapon)(long res, const void *specialfile, long swap_flags) {}\n\nPRE_SYSCALL(swapoff)(const void *specialfile) {\n  if (specialfile)\n    PRE_READ(specialfile,\n             __sanitizer::internal_strlen((const char *)specialfile) + 1);\n}\n\nPOST_SYSCALL(swapoff)(long res, const void *specialfile) {}\n\nPRE_SYSCALL(sysctl)(__sanitizer___sysctl_args *args) {\n  if (args) {\n    if (args->name)\n      PRE_READ(args->name, args->nlen * sizeof(*args->name));\n    if (args->newval)\n      PRE_READ(args->name, args->newlen);\n  }\n}\n\nPOST_SYSCALL(sysctl)(long res, __sanitizer___sysctl_args *args) {\n  if (res >= 0) {\n    if (args && args->oldval && args->oldlenp) {\n      POST_WRITE(args->oldlenp, sizeof(*args->oldlenp));\n      POST_WRITE(args->oldval, *args->oldlenp);\n    }\n  }\n}\n\nPRE_SYSCALL(sysinfo)(void *info) {}\n\nPOST_SYSCALL(sysinfo)(long res, void *info) {\n  if (res >= 0) {\n    if (info)\n      POST_WRITE(info, struct_sysinfo_sz);\n  }\n}\n\nPRE_SYSCALL(sysfs)(long option, long arg1, long arg2) {}\n\nPOST_SYSCALL(sysfs)(long res, long option, long arg1, long arg2) {}\n\nPRE_SYSCALL(syslog)(long type, void *buf, long len) {}\n\nPOST_SYSCALL(syslog)(long res, long type, void *buf, long len) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);\n  }\n}\n\nPRE_SYSCALL(uselib)(const void *library) {\n  if (library)\n    PRE_READ(library, __sanitizer::internal_strlen((const char *)library) + 1);\n}\n\nPOST_SYSCALL(uselib)(long res, const void *library) {}\n\nPRE_SYSCALL(ni_syscall)() {}\n\nPOST_SYSCALL(ni_syscall)(long res) {}\n\nPRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {\n#  if !SANITIZER_ANDROID &&                                                   \\\n      (defined(__i386) || defined(__x86_64) || defined(__mips64) ||           \\\n       defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \\\n       SANITIZER_RISCV64)\n  if (data) {\n    if (request == ptrace_setregs) {\n      PRE_READ((void *)data, struct_user_regs_struct_sz);\n    } else if (request == ptrace_setfpregs) {\n      PRE_READ((void *)data, struct_user_fpregs_struct_sz);\n    } else if (request == ptrace_setfpxregs) {\n      PRE_READ((void *)data, struct_user_fpxregs_struct_sz);\n    } else if (request == ptrace_setsiginfo) {\n      PRE_READ((void *)data, siginfo_t_sz);\n    } else if (request == ptrace_setregset) {\n      __sanitizer_iovec *iov = (__sanitizer_iovec *)data;\n      PRE_READ(iov->iov_base, iov->iov_len);\n    }\n  }\n#  endif\n}\n\nPOST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {\n#  if !SANITIZER_ANDROID &&                                                   \\\n      (defined(__i386) || defined(__x86_64) || defined(__mips64) ||           \\\n       defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \\\n       SANITIZER_RISCV64)\n  if (res >= 0 && data) {\n    // Note that this is different from the interceptor in\n    // sanitizer_common_interceptors.inc.\n    // PEEK* requests return resulting values through data pointer.\n    if (request == ptrace_getregs) {\n      POST_WRITE((void *)data, struct_user_regs_struct_sz);\n    } else if (request == ptrace_getfpregs) {\n      POST_WRITE((void *)data, struct_user_fpregs_struct_sz);\n    } else if (request == ptrace_getfpxregs) {\n      POST_WRITE((void *)data, struct_user_fpxregs_struct_sz);\n    } else if (request == ptrace_getsiginfo) {\n      POST_WRITE((void *)data, siginfo_t_sz);\n    } else if (request == ptrace_getregset) {\n      __sanitizer_iovec *iov = (__sanitizer_iovec *)data;\n      POST_WRITE(iov->iov_base, iov->iov_len);\n    } else if (request == ptrace_peekdata || request == ptrace_peektext ||\n               request == ptrace_peekuser) {\n      POST_WRITE((void *)data, sizeof(void *));\n    }\n  }\n#  endif\n}\n\nPRE_SYSCALL(add_key)\n(const void *_type, const void *_description, const void *_payload, long plen,\n long destringid) {\n  if (_type)\n    PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);\n  if (_description)\n    PRE_READ(_description,\n             __sanitizer::internal_strlen((const char *)_description) + 1);\n}\n\nPOST_SYSCALL(add_key)\n(long res, const void *_type, const void *_description, const void *_payload,\n long plen, long destringid) {}\n\nPRE_SYSCALL(request_key)\n(const void *_type, const void *_description, const void *_callout_info,\n long destringid) {\n  if (_type)\n    PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);\n  if (_description)\n    PRE_READ(_description,\n             __sanitizer::internal_strlen((const char *)_description) + 1);\n  if (_callout_info)\n    PRE_READ(_callout_info,\n             __sanitizer::internal_strlen((const char *)_callout_info) + 1);\n}\n\nPOST_SYSCALL(request_key)\n(long res, const void *_type, const void *_description,\n const void *_callout_info, long destringid) {}\n\nPRE_SYSCALL(keyctl)(long cmd, long arg2, long arg3, long arg4, long arg5) {}\n\nPOST_SYSCALL(keyctl)\n(long res, long cmd, long arg2, long arg3, long arg4, long arg5) {}\n\nPRE_SYSCALL(ioprio_set)(long which, long who, long ioprio) {}\n\nPOST_SYSCALL(ioprio_set)(long res, long which, long who, long ioprio) {}\n\nPRE_SYSCALL(ioprio_get)(long which, long who) {}\n\nPOST_SYSCALL(ioprio_get)(long res, long which, long who) {}\n\nPRE_SYSCALL(set_mempolicy)(long mode, void *nmask, long maxnode) {}\n\nPOST_SYSCALL(set_mempolicy)(long res, long mode, void *nmask, long maxnode) {\n  if (res >= 0) {\n    if (nmask)\n      POST_WRITE(nmask, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(migrate_pages)\n(long pid, long maxnode, const void *from, const void *to) {\n  if (from)\n    PRE_READ(from, sizeof(long));\n  if (to)\n    PRE_READ(to, sizeof(long));\n}\n\nPOST_SYSCALL(migrate_pages)\n(long res, long pid, long maxnode, const void *from, const void *to) {}\n\nPRE_SYSCALL(move_pages)\n(long pid, long nr_pages, const void **pages, const int *nodes, int *status,\n long flags) {\n  if (pages)\n    PRE_READ(pages, nr_pages * sizeof(*pages));\n  if (nodes)\n    PRE_READ(nodes, nr_pages * sizeof(*nodes));\n}\n\nPOST_SYSCALL(move_pages)\n(long res, long pid, long nr_pages, const void **pages, const int *nodes,\n int *status, long flags) {\n  if (res >= 0) {\n    if (status)\n      POST_WRITE(status, nr_pages * sizeof(*status));\n  }\n}\n\nPRE_SYSCALL(mbind)\n(long start, long len, long mode, void *nmask, long maxnode, long flags) {}\n\nPOST_SYSCALL(mbind)\n(long res, long start, long len, long mode, void *nmask, long maxnode,\n long flags) {\n  if (res >= 0) {\n    if (nmask)\n      POST_WRITE(nmask, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(get_mempolicy)\n(void *policy, void *nmask, long maxnode, long addr, long flags) {}\n\nPOST_SYSCALL(get_mempolicy)\n(long res, void *policy, void *nmask, long maxnode, long addr, long flags) {\n  if (res >= 0) {\n    if (policy)\n      POST_WRITE(policy, sizeof(int));\n    if (nmask)\n      POST_WRITE(nmask, sizeof(long));\n  }\n}\n\nPRE_SYSCALL(inotify_init)() {}\n\nPOST_SYSCALL(inotify_init)(long res) {}\n\nPRE_SYSCALL(inotify_init1)(long flags) {}\n\nPOST_SYSCALL(inotify_init1)(long res, long flags) {}\n\nPRE_SYSCALL(inotify_add_watch)(long fd, const void *path, long mask) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(inotify_add_watch)\n(long res, long fd, const void *path, long mask) {}\n\nPRE_SYSCALL(inotify_rm_watch)(long fd, long wd) {}\n\nPOST_SYSCALL(inotify_rm_watch)(long res, long fd, long wd) {}\n\nPRE_SYSCALL(spu_run)(long fd, void *unpc, void *ustatus) {}\n\nPOST_SYSCALL(spu_run)(long res, long fd, unsigned *unpc, unsigned *ustatus) {\n  if (res >= 0) {\n    if (unpc)\n      POST_WRITE(unpc, sizeof(*unpc));\n    if (ustatus)\n      POST_WRITE(ustatus, sizeof(*ustatus));\n  }\n}\n\nPRE_SYSCALL(spu_create)(const void *name, long flags, long mode, long fd) {\n  if (name)\n    PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);\n}\n\nPOST_SYSCALL(spu_create)\n(long res, const void *name, long flags, long mode, long fd) {}\n\nPRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(mknodat)\n(long res, long dfd, const void *filename, long mode, long dev) {}\n\nPRE_SYSCALL(mkdirat)(long dfd, const void *pathname, long mode) {\n  if (pathname)\n    PRE_READ(pathname,\n             __sanitizer::internal_strlen((const char *)pathname) + 1);\n}\n\nPOST_SYSCALL(mkdirat)(long res, long dfd, const void *pathname, long mode) {}\n\nPRE_SYSCALL(unlinkat)(long dfd, const void *pathname, long flag) {\n  if (pathname)\n    PRE_READ(pathname,\n             __sanitizer::internal_strlen((const char *)pathname) + 1);\n}\n\nPOST_SYSCALL(unlinkat)(long res, long dfd, const void *pathname, long flag) {}\n\nPRE_SYSCALL(symlinkat)(const void *oldname, long newdfd, const void *newname) {\n  if (oldname)\n    PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);\n  if (newname)\n    PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);\n}\n\nPOST_SYSCALL(symlinkat)\n(long res, const void *oldname, long newdfd, const void *newname) {}\n\nPRE_SYSCALL(linkat)\n(long olddfd, const void *oldname, long newdfd, const void *newname,\n long flags) {\n  if (oldname)\n    PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);\n  if (newname)\n    PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);\n}\n\nPOST_SYSCALL(linkat)\n(long res, long olddfd, const void *oldname, long newdfd, const void *newname,\n long flags) {}\n\nPRE_SYSCALL(renameat)\n(long olddfd, const void *oldname, long newdfd, const void *newname) {\n  if (oldname)\n    PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);\n  if (newname)\n    PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);\n}\n\nPOST_SYSCALL(renameat)\n(long res, long olddfd, const void *oldname, long newdfd, const void *newname) {\n}\n\nPRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(futimesat)\n(long res, long dfd, const void *filename, void *utimes) {\n  if (res >= 0) {\n    if (utimes)\n      POST_WRITE(utimes, timeval_sz);\n  }\n}\n\nPRE_SYSCALL(faccessat)(long dfd, const void *filename, long mode) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(faccessat)(long res, long dfd, const void *filename, long mode) {}\n\nPRE_SYSCALL(fchmodat)(long dfd, const void *filename, long mode) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(fchmodat)(long res, long dfd, const void *filename, long mode) {}\n\nPRE_SYSCALL(fchownat)\n(long dfd, const void *filename, long user, long group, long flag) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(fchownat)\n(long res, long dfd, const void *filename, long user, long group, long flag) {}\n\nPRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(openat)\n(long res, long dfd, const void *filename, long flags, long mode) {}\n\nPRE_SYSCALL(newfstatat)\n(long dfd, const void *filename, void *statbuf, long flag) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(newfstatat)\n(long res, long dfd, const void *filename, void *statbuf, long flag) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat_sz);\n  }\n}\n\nPRE_SYSCALL(fstatat64)\n(long dfd, const void *filename, void *statbuf, long flag) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(fstatat64)\n(long res, long dfd, const void *filename, void *statbuf, long flag) {\n  if (res >= 0) {\n    if (statbuf)\n      POST_WRITE(statbuf, struct_kernel_stat64_sz);\n  }\n}\n\nPRE_SYSCALL(readlinkat)(long dfd, const void *path, void *buf, long bufsiz) {\n  if (path)\n    PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);\n}\n\nPOST_SYSCALL(readlinkat)\n(long res, long dfd, const void *path, void *buf, long bufsiz) {\n  if (res >= 0) {\n    if (buf)\n      POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);\n  }\n}\n\nPRE_SYSCALL(utimensat)\n(long dfd, const void *filename, void *utimes, long flags) {\n  if (filename)\n    PRE_READ(filename,\n             __sanitizer::internal_strlen((const char *)filename) + 1);\n}\n\nPOST_SYSCALL(utimensat)\n(long res, long dfd, const void *filename, void *utimes, long flags) {\n  if (res >= 0) {\n    if (utimes)\n      POST_WRITE(utimes, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(unshare)(long unshare_flags) {}\n\nPOST_SYSCALL(unshare)(long res, long unshare_flags) {}\n\nPRE_SYSCALL(splice)\n(long fd_in, void *off_in, long fd_out, void *off_out, long len, long flags) {}\n\nPOST_SYSCALL(splice)\n(long res, long fd_in, void *off_in, long fd_out, void *off_out, long len,\n long flags) {\n  if (res >= 0) {\n    if (off_in)\n      POST_WRITE(off_in, sizeof(long long));\n    if (off_out)\n      POST_WRITE(off_out, sizeof(long long));\n  }\n}\n\nPRE_SYSCALL(vmsplice)\n(long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {}\n\nPOST_SYSCALL(vmsplice)\n(long res, long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {\n  if (res >= 0) {\n    if (iov)\n      kernel_read_iovec(iov, nr_segs, res);\n  }\n}\n\nPRE_SYSCALL(tee)(long fdin, long fdout, long len, long flags) {}\n\nPOST_SYSCALL(tee)(long res, long fdin, long fdout, long len, long flags) {}\n\nPRE_SYSCALL(get_robust_list)(long pid, void *head_ptr, void *len_ptr) {}\n\nPOST_SYSCALL(get_robust_list)\n(long res, long pid, void *head_ptr, void *len_ptr) {}\n\nPRE_SYSCALL(set_robust_list)(void *head, long len) {}\n\nPOST_SYSCALL(set_robust_list)(long res, void *head, long len) {}\n\nPRE_SYSCALL(getcpu)(void *cpu, void *node, void *cache) {}\n\nPOST_SYSCALL(getcpu)(long res, void *cpu, void *node, void *cache) {\n  if (res >= 0) {\n    if (cpu)\n      POST_WRITE(cpu, sizeof(unsigned));\n    if (node)\n      POST_WRITE(node, sizeof(unsigned));\n    // The third argument to this system call is nowadays unused.\n  }\n}\n\nPRE_SYSCALL(signalfd)(long ufd, void *user_mask, long sizemask) {}\n\nPOST_SYSCALL(signalfd)\n(long res, long ufd, kernel_sigset_t *user_mask, long sizemask) {\n  if (res >= 0) {\n    if (user_mask)\n      POST_WRITE(user_mask, sizemask);\n  }\n}\n\nPRE_SYSCALL(signalfd4)(long ufd, void *user_mask, long sizemask, long flags) {}\n\nPOST_SYSCALL(signalfd4)\n(long res, long ufd, kernel_sigset_t *user_mask, long sizemask, long flags) {\n  if (res >= 0) {\n    if (user_mask)\n      POST_WRITE(user_mask, sizemask);\n  }\n}\n\nPRE_SYSCALL(timerfd_create)(long clockid, long flags) {}\n\nPOST_SYSCALL(timerfd_create)(long res, long clockid, long flags) {}\n\nPRE_SYSCALL(timerfd_settime)\n(long ufd, long flags, const void *utmr, void *otmr) {\n  if (utmr)\n    PRE_READ(utmr, struct_itimerspec_sz);\n}\n\nPOST_SYSCALL(timerfd_settime)\n(long res, long ufd, long flags, const void *utmr, void *otmr) {\n  if (res >= 0) {\n    if (otmr)\n      POST_WRITE(otmr, struct_itimerspec_sz);\n  }\n}\n\nPRE_SYSCALL(timerfd_gettime)(long ufd, void *otmr) {}\n\nPOST_SYSCALL(timerfd_gettime)(long res, long ufd, void *otmr) {\n  if (res >= 0) {\n    if (otmr)\n      POST_WRITE(otmr, struct_itimerspec_sz);\n  }\n}\n\nPRE_SYSCALL(eventfd)(long count) {}\n\nPOST_SYSCALL(eventfd)(long res, long count) {}\n\nPRE_SYSCALL(eventfd2)(long count, long flags) {}\n\nPOST_SYSCALL(eventfd2)(long res, long count, long flags) {}\n\nPRE_SYSCALL(old_readdir)(long arg0, void *arg1, long arg2) {}\n\nPOST_SYSCALL(old_readdir)(long res, long arg0, void *arg1, long arg2) {\n  // Missing definition of 'struct old_linux_dirent'.\n}\n\nPRE_SYSCALL(pselect6)\n(long arg0, __sanitizer___kernel_fd_set *arg1,\n __sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,\n void *arg4, void *arg5) {}\n\nPOST_SYSCALL(pselect6)\n(long res, long arg0, __sanitizer___kernel_fd_set *arg1,\n __sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,\n void *arg4, void *arg5) {\n  if (res >= 0) {\n    if (arg1)\n      POST_WRITE(arg1, sizeof(*arg1));\n    if (arg2)\n      POST_WRITE(arg2, sizeof(*arg2));\n    if (arg3)\n      POST_WRITE(arg3, sizeof(*arg3));\n    if (arg4)\n      POST_WRITE(arg4, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(ppoll)\n(__sanitizer_pollfd *arg0, long arg1, void *arg2, const kernel_sigset_t *arg3,\n long arg4) {\n  if (arg3)\n    PRE_READ(arg3, arg4);\n}\n\nPOST_SYSCALL(ppoll)\n(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2, const void *arg3,\n long arg4) {\n  if (res >= 0) {\n    if (arg0)\n      POST_WRITE(arg0, sizeof(*arg0));\n    if (arg2)\n      POST_WRITE(arg2, struct_timespec_sz);\n  }\n}\n\nPRE_SYSCALL(syncfs)(long fd) {}\n\nPOST_SYSCALL(syncfs)(long res, long fd) {}\n\nPRE_SYSCALL(perf_event_open)\n(__sanitizer_perf_event_attr *attr_uptr, long pid, long cpu, long group_fd,\n long flags) {\n  if (attr_uptr)\n    PRE_READ(attr_uptr, attr_uptr->size);\n}\n\nPOST_SYSCALL(perf_event_open)\n(long res, __sanitizer_perf_event_attr *attr_uptr, long pid, long cpu,\n long group_fd, long flags) {}\n\nPRE_SYSCALL(mmap_pgoff)\n(long addr, long len, long prot, long flags, long fd, long pgoff) {}\n\nPOST_SYSCALL(mmap_pgoff)\n(long res, long addr, long len, long prot, long flags, long fd, long pgoff) {}\n\nPRE_SYSCALL(old_mmap)(void *arg) {}\n\nPOST_SYSCALL(old_mmap)(long res, void *arg) {}\n\nPRE_SYSCALL(name_to_handle_at)\n(long dfd, const void *name, void *handle, void *mnt_id, long flag) {}\n\nPOST_SYSCALL(name_to_handle_at)\n(long res, long dfd, const void *name, void *handle, void *mnt_id, long flag) {}\n\nPRE_SYSCALL(open_by_handle_at)(long mountdirfd, void *handle, long flags) {}\n\nPOST_SYSCALL(open_by_handle_at)\n(long res, long mountdirfd, void *handle, long flags) {}\n\nPRE_SYSCALL(setns)(long fd, long nstype) {}\n\nPOST_SYSCALL(setns)(long res, long fd, long nstype) {}\n\nPRE_SYSCALL(process_vm_readv)\n(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,\n long riovcnt, long flags) {}\n\nPOST_SYSCALL(process_vm_readv)\n(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,\n const void *rvec, long riovcnt, long flags) {\n  if (res >= 0) {\n    if (lvec)\n      kernel_write_iovec(lvec, liovcnt, res);\n  }\n}\n\nPRE_SYSCALL(process_vm_writev)\n(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,\n long riovcnt, long flags) {}\n\nPOST_SYSCALL(process_vm_writev)\n(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,\n const void *rvec, long riovcnt, long flags) {\n  if (res >= 0) {\n    if (lvec)\n      kernel_read_iovec(lvec, liovcnt, res);\n  }\n}\n\nPRE_SYSCALL(fork)() { COMMON_SYSCALL_PRE_FORK(); }\n\nPOST_SYSCALL(fork)(long res) { COMMON_SYSCALL_POST_FORK(res); }\n\nPRE_SYSCALL(vfork)() { COMMON_SYSCALL_PRE_FORK(); }\n\nPOST_SYSCALL(vfork)(long res) { COMMON_SYSCALL_POST_FORK(res); }\n\nPRE_SYSCALL(sigaction)\n(long signum, const __sanitizer_kernel_sigaction_t *act,\n __sanitizer_kernel_sigaction_t *oldact) {\n  if (act) {\n    PRE_READ(&act->sigaction, sizeof(act->sigaction));\n    PRE_READ(&act->sa_flags, sizeof(act->sa_flags));\n    PRE_READ(&act->sa_mask, sizeof(act->sa_mask));\n  }\n}\n\nPOST_SYSCALL(sigaction)\n(long res, long signum, const __sanitizer_kernel_sigaction_t *act,\n __sanitizer_kernel_sigaction_t *oldact) {\n  if (res >= 0 && oldact)\n    POST_WRITE(oldact, sizeof(*oldact));\n}\n\nPRE_SYSCALL(rt_sigaction)\n(long signum, const __sanitizer_kernel_sigaction_t *act,\n __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {\n  if (act) {\n    PRE_READ(&act->sigaction, sizeof(act->sigaction));\n    PRE_READ(&act->sa_flags, sizeof(act->sa_flags));\n    PRE_READ(&act->sa_mask, sz);\n  }\n}\n\nPOST_SYSCALL(rt_sigaction)\n(long res, long signum, const __sanitizer_kernel_sigaction_t *act,\n __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {\n  if (res >= 0 && oldact) {\n    SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;\n    POST_WRITE(oldact, oldact_sz);\n  }\n}\n\nPRE_SYSCALL(getrandom)(void *buf, uptr count, long flags) {\n  if (buf) {\n    PRE_WRITE(buf, count);\n  }\n}\n\nPOST_SYSCALL(getrandom)(long res, void *buf, uptr count, long flags) {\n  if (res > 0 && buf) {\n    POST_WRITE(buf, res);\n  }\n}\n\nPRE_SYSCALL(sigaltstack)(const void *ss, void *oss) {\n  if (ss != nullptr) {\n    PRE_READ(ss, struct_stack_t_sz);\n  }\n  if (oss != nullptr) {\n    PRE_WRITE(oss, struct_stack_t_sz);\n  }\n}\n\nPOST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {\n  if (res == 0) {\n    if (oss != nullptr) {\n      POST_WRITE(oss, struct_stack_t_sz);\n    }\n  }\n}\n}  // extern \"C\"\n\n#  undef PRE_SYSCALL\n#  undef PRE_READ\n#  undef PRE_WRITE\n#  undef POST_SYSCALL\n#  undef POST_READ\n#  undef POST_WRITE\n\n#endif  // SANITIZER_LINUX\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_fuchsia.cpp",
    "content": "//===-- sanitizer_coverage_fuchsia.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version.\n//\n// This Fuchsia-specific implementation uses the same basic scheme and the\n// same simple '.sancov' file format as the generic implementation.  The\n// difference is that we just produce a single blob of output for the whole\n// program, not a separate one per DSO.  We do not sort the PC table and do\n// not prune the zeros, so the resulting file is always as large as it\n// would be to report 100% coverage.  Implicit tracing information about\n// the address ranges of DSOs allows offline tools to split the one big\n// blob into separate files that the 'sancov' tool can understand.\n//\n// Unlike the traditional implementation that uses an atexit hook to write\n// out data files at the end, the results on Fuchsia do not go into a file\n// per se.  The 'coverage_dir' option is ignored.  Instead, they are stored\n// directly into a shared memory object (a Zircon VMO).  At exit, that VMO\n// is handed over to a system service that's responsible for getting the\n// data out to somewhere that it can be fed into the sancov tool (where and\n// how is not our problem).\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_FUCHSIA\n#include <zircon/process.h>\n#include <zircon/sanitizer.h>\n#include <zircon/syscalls.h>\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_symbolizer_fuchsia.h\"\n\nusing namespace __sanitizer;\n\nnamespace __sancov {\nnamespace {\n\n// TODO(mcgrathr): Move the constant into a header shared with other impls.\nconstexpr u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;\nstatic_assert(SANITIZER_WORDSIZE == 64, \"Fuchsia is always LP64\");\n\nconstexpr const char kSancovSinkName[] = \"sancov\";\n\n// Collects trace-pc guard coverage.\n// This class relies on zero-initialization.\nclass TracePcGuardController final {\n public:\n  constexpr TracePcGuardController() {}\n\n  // For each PC location being tracked, there is a u32 reserved in global\n  // data called the \"guard\".  At startup, we assign each guard slot a\n  // unique index into the big results array.  Later during runtime, the\n  // first call to TracePcGuard (below) will store the corresponding PC at\n  // that index in the array.  (Each later call with the same guard slot is\n  // presumed to be from the same PC.)  Then it clears the guard slot back\n  // to zero, which tells the compiler not to bother calling in again.  At\n  // the end of the run, we have a big array where each element is either\n  // zero or is a tracked PC location that was hit in the trace.\n\n  // This is called from global constructors.  Each translation unit has a\n  // contiguous array of guard slots, and a constructor that calls here\n  // with the bounds of its array.  Those constructors are allowed to call\n  // here more than once for the same array.  Usually all of these\n  // constructors run in the initial thread, but it's possible that a\n  // dlopen call on a secondary thread will run constructors that get here.\n  void InitTracePcGuard(u32 *start, u32 *end) {\n    if (end > start && *start == 0 && common_flags()->coverage) {\n      // Complete the setup before filling in any guards with indices.\n      // This avoids the possibility of code called from Setup reentering\n      // TracePcGuard.\n      u32 idx = Setup(end - start);\n      for (u32 *p = start; p < end; ++p) {\n        *p = idx++;\n      }\n    }\n  }\n\n  void TracePcGuard(u32 *guard, uptr pc) {\n    atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard);\n    u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);\n    if (idx > 0)\n      array_[idx] = pc;\n  }\n\n  void Dump() {\n    Lock locked(&setup_lock_);\n    if (array_) {\n      CHECK_NE(vmo_, ZX_HANDLE_INVALID);\n\n      // Publish the VMO to the system, where it can be collected and\n      // analyzed after this process exits.  This always consumes the VMO\n      // handle.  Any failure is just logged and not indicated to us.\n      __sanitizer_publish_data(kSancovSinkName, vmo_);\n      vmo_ = ZX_HANDLE_INVALID;\n\n      // This will route to __sanitizer_log_write, which will ensure that\n      // information about shared libraries is written out.  This message\n      // uses the `dumpfile` symbolizer markup element to highlight the\n      // dump.  See the explanation for this in:\n      // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md\n      Printf(\"SanitizerCoverage: \" FORMAT_DUMPFILE \" with up to %u PCs\\n\",\n             kSancovSinkName, vmo_name_, next_index_ - 1);\n    }\n  }\n\n private:\n  // We map in the largest possible view into the VMO: one word\n  // for every possible 32-bit index value.  This avoids the need\n  // to change the mapping when increasing the size of the VMO.\n  // We can always spare the 32G of address space.\n  static constexpr size_t MappingSize = sizeof(uptr) << 32;\n\n  Mutex setup_lock_;\n  uptr *array_ = nullptr;\n  u32 next_index_ = 0;\n  zx_handle_t vmo_ = {};\n  char vmo_name_[ZX_MAX_NAME_LEN] = {};\n\n  size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }\n\n  u32 Setup(u32 num_guards) {\n    Lock locked(&setup_lock_);\n    DCHECK(common_flags()->coverage);\n\n    if (next_index_ == 0) {\n      CHECK_EQ(vmo_, ZX_HANDLE_INVALID);\n      CHECK_EQ(array_, nullptr);\n\n      // The first sample goes at [1] to reserve [0] for the magic number.\n      next_index_ = 1 + num_guards;\n\n      zx_status_t status = _zx_vmo_create(DataSize(), ZX_VMO_RESIZABLE, &vmo_);\n      CHECK_EQ(status, ZX_OK);\n\n      // Give the VMO a name including our process KOID so it's easy to spot.\n      internal_snprintf(vmo_name_, sizeof(vmo_name_), \"%s.%zu\", kSancovSinkName,\n                        internal_getpid());\n      _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_,\n                              internal_strlen(vmo_name_));\n      uint64_t size = DataSize();\n      status = _zx_object_set_property(vmo_, ZX_PROP_VMO_CONTENT_SIZE, &size,\n                                       sizeof(size));\n      CHECK_EQ(status, ZX_OK);\n\n      // Map the largest possible view we might need into the VMO.  Later\n      // we might need to increase the VMO's size before we can use larger\n      // indices, but we'll never move the mapping address so we don't have\n      // any multi-thread synchronization issues with that.\n      uintptr_t mapping;\n      status =\n          _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,\n                       0, vmo_, 0, MappingSize, &mapping);\n      CHECK_EQ(status, ZX_OK);\n\n      // Hereafter other threads are free to start storing into\n      // elements [1, next_index_) of the big array.\n      array_ = reinterpret_cast<uptr *>(mapping);\n\n      // Store the magic number.\n      // Hereafter, the VMO serves as the contents of the '.sancov' file.\n      array_[0] = Magic64;\n\n      return 1;\n    } else {\n      // The VMO is already mapped in, but it's not big enough to use the\n      // new indices.  So increase the size to cover the new maximum index.\n\n      CHECK_NE(vmo_, ZX_HANDLE_INVALID);\n      CHECK_NE(array_, nullptr);\n\n      uint32_t first_index = next_index_;\n      next_index_ += num_guards;\n\n      zx_status_t status = _zx_vmo_set_size(vmo_, DataSize());\n      CHECK_EQ(status, ZX_OK);\n      uint64_t size = DataSize();\n      status = _zx_object_set_property(vmo_, ZX_PROP_VMO_CONTENT_SIZE, &size,\n                                       sizeof(size));\n      CHECK_EQ(status, ZX_OK);\n\n      return first_index;\n    }\n  }\n};\n\nstatic TracePcGuardController pc_guard_controller;\n\n}  // namespace\n}  // namespace __sancov\n\nnamespace __sanitizer {\nvoid InitializeCoverage(bool enabled, const char *dir) {\n  CHECK_EQ(enabled, common_flags()->coverage);\n  CHECK_EQ(dir, common_flags()->coverage_dir);\n\n  static bool coverage_enabled = false;\n  if (!coverage_enabled) {\n    coverage_enabled = enabled;\n    Atexit(__sanitizer_cov_dump);\n    AddDieCallback(__sanitizer_cov_dump);\n  }\n}\n}  // namespace __sanitizer\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr *pcs,\n                                                             uptr len) {\n  UNIMPLEMENTED();\n}\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) {\n  if (!*guard)\n    return;\n  __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);\n}\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,\n                             u32 *start, u32 *end) {\n  if (start == end || *start)\n    return;\n  __sancov::pc_guard_controller.InitTracePcGuard(start, end);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {\n  __sancov::pc_guard_controller.Dump();\n}\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {\n  __sanitizer_dump_trace_pc_guard_coverage();\n}\n// Default empty implementations (weak). Users should redefine them.\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}\n}  // extern \"C\"\n\n#endif  // !SANITIZER_FUCHSIA\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_interface.inc",
    "content": "//===-- sanitizer_coverage_interface.inc ----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Sanitizer Coverage interface list.\n//===----------------------------------------------------------------------===//\nINTERFACE_FUNCTION(__sanitizer_cov_dump)\nINTERFACE_FUNCTION(__sanitizer_cov_reset)\nINTERFACE_FUNCTION(__sanitizer_dump_coverage)\nINTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)\nINTERFACE_WEAK_FUNCTION(__sancov_default_options)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp1)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp2)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp4)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp8)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init)\nINTERFACE_WEAK_FUNCTION(__sanitizer_cov_pcs_init)\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_libcdep_new.cpp",
    "content": "//===-- sanitizer_coverage_libcdep_new.cpp --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Sanitizer Coverage Controller for Trace PC Guard.\n\n#include \"sanitizer_platform.h\"\n\n#if !SANITIZER_FUCHSIA\n#include \"sancov_flags.h\"\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n\nusing namespace __sanitizer;\n\nusing AddressRange = LoadedModule::AddressRange;\n\nnamespace __sancov {\nnamespace {\n\nstatic const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;\nstatic const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;\nstatic const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;\n\nstatic fd_t OpenFile(const char* path) {\n  error_t err;\n  fd_t fd = OpenFile(path, WrOnly, &err);\n  if (fd == kInvalidFd)\n    Report(\"SanitizerCoverage: failed to open %s for writing (reason: %d)\\n\",\n           path, err);\n  return fd;\n}\n\nstatic void GetCoverageFilename(char* path, const char* name,\n                                const char* extension) {\n  CHECK(name);\n  internal_snprintf(path, kMaxPathLength, \"%s/%s.%zd.%s\",\n                    common_flags()->coverage_dir, name, internal_getpid(),\n                    extension);\n}\n\nstatic void WriteModuleCoverage(char* file_path, const char* module_name,\n                                const uptr* pcs, uptr len) {\n  GetCoverageFilename(file_path, StripModuleName(module_name), \"sancov\");\n  fd_t fd = OpenFile(file_path);\n  WriteToFile(fd, &Magic, sizeof(Magic));\n  WriteToFile(fd, pcs, len * sizeof(*pcs));\n  CloseFile(fd);\n  Printf(\"SanitizerCoverage: %s: %zd PCs written\\n\", file_path, len);\n}\n\nstatic void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {\n  if (!len) return;\n\n  char* file_path = static_cast<char*>(InternalAlloc(kMaxPathLength));\n  char* module_name = static_cast<char*>(InternalAlloc(kMaxPathLength));\n  uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr)));\n\n  internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));\n  Sort(pcs, len);\n\n  bool module_found = false;\n  uptr last_base = 0;\n  uptr module_start_idx = 0;\n\n  for (uptr i = 0; i < len; ++i) {\n    const uptr pc = pcs[i];\n    if (!pc) continue;\n\n    if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {\n      Printf(\"ERROR: unknown pc 0x%zx (may happen if dlclose is used)\\n\", pc);\n      continue;\n    }\n    uptr module_base = pc - pcs[i];\n\n    if (module_base != last_base || !module_found) {\n      if (module_found) {\n        WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],\n                            i - module_start_idx);\n      }\n\n      last_base = module_base;\n      module_start_idx = i;\n      module_found = true;\n      __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,\n                                               &pcs[i]);\n    }\n  }\n\n  if (module_found) {\n    WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],\n                        len - module_start_idx);\n  }\n\n  InternalFree(file_path);\n  InternalFree(module_name);\n  InternalFree(pcs);\n}\n\n// Collects trace-pc guard coverage.\n// This class relies on zero-initialization.\nclass TracePcGuardController {\n public:\n  void Initialize() {\n    CHECK(!initialized);\n\n    initialized = true;\n    InitializeSancovFlags();\n\n    pc_vector.Initialize(0);\n  }\n\n  void InitTracePcGuard(u32* start, u32* end) {\n    if (!initialized) Initialize();\n    CHECK(!*start);\n    CHECK_NE(start, end);\n\n    u32 i = pc_vector.size();\n    for (u32* p = start; p < end; p++) *p = ++i;\n    pc_vector.resize(i);\n  }\n\n  void TracePcGuard(u32* guard, uptr pc) {\n    u32 idx = *guard;\n    if (!idx) return;\n    // we start indices from 1.\n    atomic_uintptr_t* pc_ptr =\n        reinterpret_cast<atomic_uintptr_t*>(&pc_vector[idx - 1]);\n    if (atomic_load(pc_ptr, memory_order_relaxed) == 0)\n      atomic_store(pc_ptr, pc, memory_order_relaxed);\n  }\n\n  void Reset() {\n    internal_memset(&pc_vector[0], 0, sizeof(pc_vector[0]) * pc_vector.size());\n  }\n\n  void Dump() {\n    if (!initialized || !common_flags()->coverage) return;\n    __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());\n  }\n\n private:\n  bool initialized;\n  InternalMmapVectorNoCtor<uptr> pc_vector;\n};\n\nstatic TracePcGuardController pc_guard_controller;\n\n// A basic default implementation of callbacks for\n// -fsanitize-coverage=inline-8bit-counters,pc-table.\n// Use TOOL_OPTIONS (UBSAN_OPTIONS, etc) to dump the coverage data:\n// * cov_8bit_counters_out=PATH to dump the 8bit counters.\n// * cov_pcs_out=PATH to dump the pc table.\n//\n// Most users will still need to define their own callbacks for greater\n// flexibility.\nnamespace SingletonCounterCoverage {\n\nstatic char *counters_beg, *counters_end;\nstatic const uptr *pcs_beg, *pcs_end;\n\nstatic void DumpCoverage() {\n  const char* file_path = common_flags()->cov_8bit_counters_out;\n  if (file_path && internal_strlen(file_path)) {\n    fd_t fd = OpenFile(file_path);\n    FileCloser file_closer(fd);\n    uptr size = counters_end - counters_beg;\n    WriteToFile(fd, counters_beg, size);\n    if (common_flags()->verbosity)\n      __sanitizer::Printf(\"cov_8bit_counters_out: written %zd bytes to %s\\n\",\n                          size, file_path);\n  }\n  file_path = common_flags()->cov_pcs_out;\n  if (file_path && internal_strlen(file_path)) {\n    fd_t fd = OpenFile(file_path);\n    FileCloser file_closer(fd);\n    uptr size = (pcs_end - pcs_beg) * sizeof(uptr);\n    WriteToFile(fd, pcs_beg, size);\n    if (common_flags()->verbosity)\n      __sanitizer::Printf(\"cov_pcs_out: written %zd bytes to %s\\n\", size,\n                          file_path);\n  }\n}\n\nstatic void Cov8bitCountersInit(char* beg, char* end) {\n  counters_beg = beg;\n  counters_end = end;\n  Atexit(DumpCoverage);\n}\n\nstatic void CovPcsInit(const uptr* beg, const uptr* end) {\n  pcs_beg = beg;\n  pcs_end = end;\n}\n\n}  // namespace SingletonCounterCoverage\n\n}  // namespace\n}  // namespace __sancov\n\nnamespace __sanitizer {\nvoid InitializeCoverage(bool enabled, const char *dir) {\n  static bool coverage_enabled = false;\n  if (coverage_enabled)\n    return;  // May happen if two sanitizer enable coverage in the same process.\n  coverage_enabled = enabled;\n  Atexit(__sanitizer_cov_dump);\n  AddDieCallback(__sanitizer_cov_dump);\n}\n} // namespace __sanitizer\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr* pcs,\n                                                             uptr len) {\n  return __sancov::SanitizerDumpCoverage(pcs, len);\n}\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {\n  if (!*guard) return;\n  __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);\n}\n\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,\n                             u32* start, u32* end) {\n  if (start == end || *start) return;\n  __sancov::pc_guard_controller.InitTracePcGuard(start, end);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {\n  __sancov::pc_guard_controller.Dump();\n}\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {\n  __sanitizer_dump_trace_pc_guard_coverage();\n}\nSANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_reset() {\n  __sancov::pc_guard_controller.Reset();\n}\n// Default implementations (weak).\n// Either empty or very simple.\n// Most users should redefine them.\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init,\n                             char* start, char* end) {\n  __sancov::SingletonCounterCoverage::Cov8bitCountersInit(start, end);\n}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_bool_flag_init, void) {}\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr* beg,\n                             const uptr* end) {\n  __sancov::SingletonCounterCoverage::CovPcsInit(beg, end);\n}\n}  // extern \"C\"\n// Weak definition for code instrumented with -fsanitize-coverage=stack-depth\n// and later linked with code containing a strong definition.\n// E.g., -fsanitize=fuzzer-no-link\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\nSANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;\n\n#endif  // !SANITIZER_FUCHSIA\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_win_dll_thunk.cpp",
    "content": "//===-- sanitizer_coverage_win_dll_thunk.cpp ------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file defines a family of thunks that should be statically linked into\n// the DLLs that have instrumentation in order to delegate the calls to the\n// shared runtime that lives in the main binary.\n// See https://github.com/google/sanitizers/issues/209 for the details.\n//===----------------------------------------------------------------------===//\n#ifdef SANITIZER_DLL_THUNK\n#include \"sanitizer_win_dll_thunk.h\"\n// Sanitizer Coverage interface functions.\n#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)\n#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)\n#include \"sanitizer_coverage_interface.inc\"\n#endif // SANITIZER_DLL_THUNK\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cpp",
    "content": "//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cpp ------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file defines things that need to be present in the application modules\n// to interact with Sanitizer Coverage, when it is included in a dll.\n//\n//===----------------------------------------------------------------------===//\n#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK\n#define SANITIZER_IMPORT_INTERFACE 1\n#include \"sanitizer_win_defs.h\"\n// Define weak alias for all weak functions imported from sanitizer coverage.\n#define INTERFACE_FUNCTION(Name)\n#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)\n#include \"sanitizer_coverage_interface.inc\"\n#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK\n\nnamespace __sanitizer {\n// Add one, otherwise unused, external symbol to this object file so that the\n// Visual C++ linker includes it and reads the .drective section.\nvoid ForceWholeArchiveIncludeForSanCov() {}\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_win_sections.cpp",
    "content": "//===-- sanitizer_coverage_win_sections.cpp -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file defines delimiters for Sanitizer Coverage's section. It contains\n// Windows specific tricks to coax the linker into giving us the start and stop\n// addresses of a section, as ELF linkers can do, to get the size of certain\n// arrays. According to https://msdn.microsoft.com/en-us/library/7977wcck.aspx\n// sections with the same name before \"$\" are sorted alphabetically by the\n// string that comes after \"$\" and merged into one section. We take advantage\n// of this by putting data we want the size of into the middle (M) of a section,\n// by using the letter \"M\" after \"$\". We get the start of this data (ie:\n// __start_section_name) by making the start variable come at the start of the\n// section (using the letter A after \"$\"). We do the same to get the end of the\n// data by using the letter \"Z\" after \"$\" to make the end variable come after\n// the data. Note that because of our technique the address of the start\n// variable is actually the address of data that comes before our middle\n// section. We also need to prevent the linker from adding any padding. Each\n// technique we use for this is explained in the comments below.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS\n#include <stdint.h>\n\nextern \"C\" {\n// Use uint64_t so the linker won't need to add any padding if it tries to word\n// align the start of the 8-bit counters array. The array will always start 8\n// bytes after __start_sancov_cntrs.\n#pragma section(\".SCOV$CA\", read, write)\n__declspec(allocate(\".SCOV$CA\")) uint64_t __start___sancov_cntrs = 0;\n\n// Even though we said not to align __stop__sancov_cntrs (using the \"align\"\n// declspec), MSVC's linker may try to align the section, .SCOV$CZ, containing\n// it. This can cause a mismatch between the number of PCs and counters since\n// each PCTable element is 8 bytes (unlike counters which are 1 byte) so no\n// padding would be added to align .SCOVP$Z, However, if .SCOV$CZ section is 1\n// byte, the linker won't try to align it on an 8-byte boundary, so use a\n// uint8_t for __stop_sancov_cntrs.\n#pragma section(\".SCOV$CZ\", read, write)\n__declspec(allocate(\".SCOV$CZ\")) __declspec(align(1)) uint8_t\n    __stop___sancov_cntrs = 0;\n\n#pragma section(\".SCOV$GA\", read, write)\n__declspec(allocate(\".SCOV$GA\")) uint64_t __start___sancov_guards = 0;\n#pragma section(\".SCOV$GZ\", read, write)\n__declspec(allocate(\".SCOV$GZ\")) __declspec(align(1)) uint8_t\n    __stop___sancov_guards = 0;\n\n// The guard array and counter array should both be merged into the .data\n// section to reduce the number of PE sections. However, because PCTable is\n// constant it should be merged with the .rdata section.\n#pragma comment(linker, \"/MERGE:.SCOV=.data\")\n\n#pragma section(\".SCOVP$A\", read)\n__declspec(allocate(\".SCOVP$A\")) uint64_t __start___sancov_pcs = 0;\n#pragma section(\".SCOVP$Z\", read)\n__declspec(allocate(\".SCOVP$Z\")) __declspec(align(1)) uint8_t\n    __stop___sancov_pcs = 0;\n\n#pragma comment(linker, \"/MERGE:.SCOVP=.rdata\")\n}\n#endif  // SANITIZER_WINDOWS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_coverage_win_weak_interception.cpp",
    "content": "//===-- sanitizer_coverage_win_weak_interception.cpp ----------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// This module should be included in Sanitizer Coverage when it implemented as a\n// shared library on Windows (dll), in order to delegate the calls of weak\n// functions to the implementation in the main executable when a strong\n// definition is provided.\n//===----------------------------------------------------------------------===//\n#ifdef SANITIZER_DYNAMIC\n#include \"sanitizer_win_weak_interception.h\"\n#include \"sanitizer_interface_internal.h\"\n#include \"sancov_flags.h\"\n// Check if strong definitions for weak functions are present in the main\n// executable. If that is the case, override dll functions to point to strong\n// implementations.\n#define INTERFACE_FUNCTION(Name)\n#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)\n#include \"sanitizer_coverage_interface.inc\"\n#endif // SANITIZER_DYNAMIC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_dbghelp.h",
    "content": "//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a\n// callback to initialize them.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_SYMBOLIZER_WIN_H\n#define SANITIZER_SYMBOLIZER_WIN_H\n\n#if !SANITIZER_WINDOWS\n#error \"sanitizer_dbghelp.h is a Windows-only header\"\n#endif\n\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n#include <dbghelp.h>\n\nnamespace __sanitizer {\n\nextern decltype(::StackWalk64) *StackWalk64;\nextern decltype(::SymCleanup) *SymCleanup;\nextern decltype(::SymFromAddr) *SymFromAddr;\nextern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;\nextern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;\nextern decltype(::SymGetModuleBase64) *SymGetModuleBase64;\nextern decltype(::SymGetSearchPathW) *SymGetSearchPathW;\nextern decltype(::SymInitialize) *SymInitialize;\nextern decltype(::SymSetOptions) *SymSetOptions;\nextern decltype(::SymSetSearchPathW) *SymSetSearchPathW;\nextern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SYMBOLIZER_WIN_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_deadlock_detector.h",
    "content": "//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// The deadlock detector maintains a directed graph of lock acquisitions.\n// When a lock event happens, the detector checks if the locks already held by\n// the current thread are reachable from the newly acquired lock.\n//\n// The detector can handle only a fixed amount of simultaneously live locks\n// (a lock is alive if it has been locked at least once and has not been\n// destroyed). When the maximal number of locks is reached the entire graph\n// is flushed and the new lock epoch is started. The node ids from the old\n// epochs can not be used with any of the detector methods except for\n// nodeBelongsToCurrentEpoch().\n//\n// FIXME: this is work in progress, nothing really works yet.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_DEADLOCK_DETECTOR_H\n#define SANITIZER_DEADLOCK_DETECTOR_H\n\n#include \"sanitizer_bvgraph.h\"\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\n// Thread-local state for DeadlockDetector.\n// It contains the locks currently held by the owning thread.\ntemplate <class BV>\nclass DeadlockDetectorTLS {\n public:\n  // No CTOR.\n  void clear() {\n    bv_.clear();\n    epoch_ = 0;\n    n_recursive_locks = 0;\n    n_all_locks_ = 0;\n  }\n\n  bool empty() const { return bv_.empty(); }\n\n  void ensureCurrentEpoch(uptr current_epoch) {\n    if (epoch_ == current_epoch) return;\n    bv_.clear();\n    epoch_ = current_epoch;\n    n_recursive_locks = 0;\n    n_all_locks_ = 0;\n  }\n\n  uptr getEpoch() const { return epoch_; }\n\n  // Returns true if this is the first (non-recursive) acquisition of this lock.\n  bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {\n    CHECK_EQ(epoch_, current_epoch);\n    if (!bv_.setBit(lock_id)) {\n      // The lock is already held by this thread, it must be recursive.\n      CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));\n      recursive_locks[n_recursive_locks++] = lock_id;\n      return false;\n    }\n    CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));\n    // lock_id < BV::kSize, can cast to a smaller int.\n    u32 lock_id_short = static_cast<u32>(lock_id);\n    LockWithContext l = {lock_id_short, stk};\n    all_locks_with_contexts_[n_all_locks_++] = l;\n    return true;\n  }\n\n  void removeLock(uptr lock_id) {\n    if (n_recursive_locks) {\n      for (sptr i = n_recursive_locks - 1; i >= 0; i--) {\n        if (recursive_locks[i] == lock_id) {\n          n_recursive_locks--;\n          Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);\n          return;\n        }\n      }\n    }\n    if (!bv_.clearBit(lock_id))\n      return;  // probably addLock happened before flush\n    if (n_all_locks_) {\n      for (sptr i = n_all_locks_ - 1; i >= 0; i--) {\n        if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {\n          Swap(all_locks_with_contexts_[i],\n               all_locks_with_contexts_[n_all_locks_ - 1]);\n          n_all_locks_--;\n          break;\n        }\n      }\n    }\n  }\n\n  u32 findLockContext(uptr lock_id) {\n    for (uptr i = 0; i < n_all_locks_; i++)\n      if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))\n        return all_locks_with_contexts_[i].stk;\n    return 0;\n  }\n\n  const BV &getLocks(uptr current_epoch) const {\n    CHECK_EQ(epoch_, current_epoch);\n    return bv_;\n  }\n\n  uptr getNumLocks() const { return n_all_locks_; }\n  uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }\n\n private:\n  BV bv_;\n  uptr epoch_;\n  uptr recursive_locks[64];\n  uptr n_recursive_locks;\n  struct LockWithContext {\n    u32 lock;\n    u32 stk;\n  };\n  LockWithContext all_locks_with_contexts_[64];\n  uptr n_all_locks_;\n};\n\n// DeadlockDetector.\n// For deadlock detection to work we need one global DeadlockDetector object\n// and one DeadlockDetectorTLS object per evey thread.\n// This class is not thread safe, all concurrent accesses should be guarded\n// by an external lock.\n// Most of the methods of this class are not thread-safe (i.e. should\n// be protected by an external lock) unless explicitly told otherwise.\ntemplate <class BV>\nclass DeadlockDetector {\n public:\n  typedef BV BitVector;\n\n  uptr size() const { return g_.size(); }\n\n  // No CTOR.\n  void clear() {\n    current_epoch_ = 0;\n    available_nodes_.clear();\n    recycled_nodes_.clear();\n    g_.clear();\n    n_edges_ = 0;\n  }\n\n  // Allocate new deadlock detector node.\n  // If we are out of available nodes first try to recycle some.\n  // If there is nothing to recycle, flush the graph and increment the epoch.\n  // Associate 'data' (opaque user's object) with the new node.\n  uptr newNode(uptr data) {\n    if (!available_nodes_.empty())\n      return getAvailableNode(data);\n    if (!recycled_nodes_.empty()) {\n      for (sptr i = n_edges_ - 1; i >= 0; i--) {\n        if (recycled_nodes_.getBit(edges_[i].from) ||\n            recycled_nodes_.getBit(edges_[i].to)) {\n          Swap(edges_[i], edges_[n_edges_ - 1]);\n          n_edges_--;\n        }\n      }\n      CHECK(available_nodes_.empty());\n      // removeEdgesFrom was called in removeNode.\n      g_.removeEdgesTo(recycled_nodes_);\n      available_nodes_.setUnion(recycled_nodes_);\n      recycled_nodes_.clear();\n      return getAvailableNode(data);\n    }\n    // We are out of vacant nodes. Flush and increment the current_epoch_.\n    current_epoch_ += size();\n    recycled_nodes_.clear();\n    available_nodes_.setAll();\n    g_.clear();\n    n_edges_ = 0;\n    return getAvailableNode(data);\n  }\n\n  // Get data associated with the node created by newNode().\n  uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }\n\n  bool nodeBelongsToCurrentEpoch(uptr node) {\n    return node && (node / size() * size()) == current_epoch_;\n  }\n\n  void removeNode(uptr node) {\n    uptr idx = nodeToIndex(node);\n    CHECK(!available_nodes_.getBit(idx));\n    CHECK(recycled_nodes_.setBit(idx));\n    g_.removeEdgesFrom(idx);\n  }\n\n  void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {\n    dtls->ensureCurrentEpoch(current_epoch_);\n  }\n\n  // Returns true if there is a cycle in the graph after this lock event.\n  // Ideally should be called before the lock is acquired so that we can\n  // report a deadlock before a real deadlock happens.\n  bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {\n    ensureCurrentEpoch(dtls);\n    uptr cur_idx = nodeToIndex(cur_node);\n    return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));\n  }\n\n  u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {\n    return dtls->findLockContext(nodeToIndex(node));\n  }\n\n  // Add cur_node to the set of locks held currently by dtls.\n  void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {\n    ensureCurrentEpoch(dtls);\n    uptr cur_idx = nodeToIndex(cur_node);\n    dtls->addLock(cur_idx, current_epoch_, stk);\n  }\n\n  // Experimental *racy* fast path function.\n  // Returns true if all edges from the currently held locks to cur_node exist.\n  bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {\n    uptr local_epoch = dtls->getEpoch();\n    // Read from current_epoch_ is racy.\n    if (cur_node && local_epoch == current_epoch_ &&\n        local_epoch == nodeToEpoch(cur_node)) {\n      uptr cur_idx = nodeToIndexUnchecked(cur_node);\n      for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {\n        if (!g_.hasEdge(dtls->getLock(i), cur_idx))\n          return false;\n      }\n      return true;\n    }\n    return false;\n  }\n\n  // Adds edges from currently held locks to cur_node,\n  // returns the number of added edges, and puts the sources of added edges\n  // into added_edges[].\n  // Should be called before onLockAfter.\n  uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,\n                int unique_tid) {\n    ensureCurrentEpoch(dtls);\n    uptr cur_idx = nodeToIndex(cur_node);\n    uptr added_edges[40];\n    uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,\n                                     added_edges, ARRAY_SIZE(added_edges));\n    for (uptr i = 0; i < n_added_edges; i++) {\n      if (n_edges_ < ARRAY_SIZE(edges_)) {\n        Edge e = {(u16)added_edges[i], (u16)cur_idx,\n                  dtls->findLockContext(added_edges[i]), stk,\n                  unique_tid};\n        edges_[n_edges_++] = e;\n      }\n    }\n    return n_added_edges;\n  }\n\n  bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,\n                int *unique_tid) {\n    uptr from_idx = nodeToIndex(from_node);\n    uptr to_idx = nodeToIndex(to_node);\n    for (uptr i = 0; i < n_edges_; i++) {\n      if (edges_[i].from == from_idx && edges_[i].to == to_idx) {\n        *stk_from = edges_[i].stk_from;\n        *stk_to = edges_[i].stk_to;\n        *unique_tid = edges_[i].unique_tid;\n        return true;\n      }\n    }\n    return false;\n  }\n\n  // Test-only function. Handles the before/after lock events,\n  // returns true if there is a cycle.\n  bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {\n    ensureCurrentEpoch(dtls);\n    bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);\n    addEdges(dtls, cur_node, stk, 0);\n    onLockAfter(dtls, cur_node, stk);\n    return is_reachable;\n  }\n\n  // Handles the try_lock event, returns false.\n  // When a try_lock event happens (i.e. a try_lock call succeeds) we need\n  // to add this lock to the currently held locks, but we should not try to\n  // change the lock graph or to detect a cycle.  We may want to investigate\n  // whether a more aggressive strategy is possible for try_lock.\n  bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {\n    ensureCurrentEpoch(dtls);\n    uptr cur_idx = nodeToIndex(cur_node);\n    dtls->addLock(cur_idx, current_epoch_, stk);\n    return false;\n  }\n\n  // Returns true iff dtls is empty (no locks are currently held) and we can\n  // add the node to the currently held locks w/o changing the global state.\n  // This operation is thread-safe as it only touches the dtls.\n  bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {\n    if (!dtls->empty()) return false;\n    if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {\n      dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);\n      return true;\n    }\n    return false;\n  }\n\n  // Finds a path between the lock 'cur_node' (currently not held in dtls)\n  // and some currently held lock, returns the length of the path\n  // or 0 on failure.\n  uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,\n                      uptr path_size) {\n    tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));\n    uptr idx = nodeToIndex(cur_node);\n    CHECK(!tmp_bv_.getBit(idx));\n    uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);\n    for (uptr i = 0; i < res; i++)\n      path[i] = indexToNode(path[i]);\n    if (res)\n      CHECK_EQ(path[0], cur_node);\n    return res;\n  }\n\n  // Handle the unlock event.\n  // This operation is thread-safe as it only touches the dtls.\n  void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {\n    if (dtls->getEpoch() == nodeToEpoch(node))\n      dtls->removeLock(nodeToIndexUnchecked(node));\n  }\n\n  // Tries to handle the lock event w/o writing to global state.\n  // Returns true on success.\n  // This operation is thread-safe as it only touches the dtls\n  // (modulo racy nature of hasAllEdges).\n  bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {\n    if (hasAllEdges(dtls, node)) {\n      dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);\n      return true;\n    }\n    return false;\n  }\n\n  bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {\n    return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));\n  }\n\n  uptr testOnlyGetEpoch() const { return current_epoch_; }\n  bool testOnlyHasEdge(uptr l1, uptr l2) {\n    return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));\n  }\n  // idx1 and idx2 are raw indices to g_, not lock IDs.\n  bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {\n    return g_.hasEdge(idx1, idx2);\n  }\n\n  void Print() {\n    for (uptr from = 0; from < size(); from++)\n      for (uptr to = 0; to < size(); to++)\n        if (g_.hasEdge(from, to))\n          Printf(\"  %zx => %zx\\n\", from, to);\n  }\n\n private:\n  void check_idx(uptr idx) const { CHECK_LT(idx, size()); }\n\n  void check_node(uptr node) const {\n    CHECK_GE(node, size());\n    CHECK_EQ(current_epoch_, nodeToEpoch(node));\n  }\n\n  uptr indexToNode(uptr idx) const {\n    check_idx(idx);\n    return idx + current_epoch_;\n  }\n\n  uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }\n\n  uptr nodeToIndex(uptr node) const {\n    check_node(node);\n    return nodeToIndexUnchecked(node);\n  }\n\n  uptr nodeToEpoch(uptr node) const { return node / size() * size(); }\n\n  uptr getAvailableNode(uptr data) {\n    uptr idx = available_nodes_.getAndClearFirstOne();\n    data_[idx] = data;\n    return indexToNode(idx);\n  }\n\n  struct Edge {\n    u16 from;\n    u16 to;\n    u32 stk_from;\n    u32 stk_to;\n    int unique_tid;\n  };\n\n  uptr current_epoch_;\n  BV available_nodes_;\n  BV recycled_nodes_;\n  BV tmp_bv_;\n  BVGraph<BV> g_;\n  uptr data_[BV::kSize];\n  Edge edges_[BV::kSize * 32];\n  uptr n_edges_;\n};\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_DEADLOCK_DETECTOR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_deadlock_detector1.cpp",
    "content": "//===-- sanitizer_deadlock_detector1.cpp ----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Deadlock detector implementation based on NxN adjacency bit matrix.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_deadlock_detector_interface.h\"\n#include \"sanitizer_deadlock_detector.h\"\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_mutex.h\"\n\n#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1\n\nnamespace __sanitizer {\n\ntypedef TwoLevelBitVector<> DDBV;  // DeadlockDetector's bit vector.\n\nstruct DDPhysicalThread {\n};\n\nstruct DDLogicalThread {\n  u64 ctx;\n  DeadlockDetectorTLS<DDBV> dd;\n  DDReport rep;\n  bool report_pending;\n};\n\nstruct DD final : public DDetector {\n  SpinMutex mtx;\n  DeadlockDetector<DDBV> dd;\n  DDFlags flags;\n\n  explicit DD(const DDFlags *flags);\n\n  DDPhysicalThread *CreatePhysicalThread() override;\n  void DestroyPhysicalThread(DDPhysicalThread *pt) override;\n\n  DDLogicalThread *CreateLogicalThread(u64 ctx) override;\n  void DestroyLogicalThread(DDLogicalThread *lt) override;\n\n  void MutexInit(DDCallback *cb, DDMutex *m) override;\n  void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;\n  void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,\n                      bool trylock) override;\n  void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;\n  void MutexDestroy(DDCallback *cb, DDMutex *m) override;\n\n  DDReport *GetReport(DDCallback *cb) override;\n\n  void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);\n  void ReportDeadlock(DDCallback *cb, DDMutex *m);\n};\n\nDDetector *DDetector::Create(const DDFlags *flags) {\n  (void)flags;\n  void *mem = MmapOrDie(sizeof(DD), \"deadlock detector\");\n  return new(mem) DD(flags);\n}\n\nDD::DD(const DDFlags *flags)\n    : flags(*flags) {\n  dd.clear();\n}\n\nDDPhysicalThread* DD::CreatePhysicalThread() {\n  return nullptr;\n}\n\nvoid DD::DestroyPhysicalThread(DDPhysicalThread *pt) {\n}\n\nDDLogicalThread* DD::CreateLogicalThread(u64 ctx) {\n  DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));\n  lt->ctx = ctx;\n  lt->dd.clear();\n  lt->report_pending = false;\n  return lt;\n}\n\nvoid DD::DestroyLogicalThread(DDLogicalThread *lt) {\n  lt->~DDLogicalThread();\n  InternalFree(lt);\n}\n\nvoid DD::MutexInit(DDCallback *cb, DDMutex *m) {\n  m->id = 0;\n  m->stk = cb->Unwind();\n}\n\nvoid DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {\n  if (!dd.nodeBelongsToCurrentEpoch(m->id))\n    m->id = dd.newNode(reinterpret_cast<uptr>(m));\n  dd.ensureCurrentEpoch(&lt->dd);\n}\n\nvoid DD::MutexBeforeLock(DDCallback *cb,\n    DDMutex *m, bool wlock) {\n  DDLogicalThread *lt = cb->lt;\n  if (lt->dd.empty()) return;  // This will be the first lock held by lt.\n  if (dd.hasAllEdges(&lt->dd, m->id)) return;  // We already have all edges.\n  SpinMutexLock lk(&mtx);\n  MutexEnsureID(lt, m);\n  if (dd.isHeld(&lt->dd, m->id))\n    return;  // FIXME: allow this only for recursive locks.\n  if (dd.onLockBefore(&lt->dd, m->id)) {\n    // Actually add this edge now so that we have all the stack traces.\n    dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());\n    ReportDeadlock(cb, m);\n  }\n}\n\nvoid DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {\n  DDLogicalThread *lt = cb->lt;\n  uptr path[20];\n  uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));\n  if (len == 0U) {\n    // A cycle of 20+ locks? Well, that's a bit odd...\n    Printf(\"WARNING: too long mutex cycle found\\n\");\n    return;\n  }\n  CHECK_EQ(m->id, path[0]);\n  lt->report_pending = true;\n  len = Min<uptr>(len, DDReport::kMaxLoopSize);\n  DDReport *rep = &lt->rep;\n  rep->n = len;\n  for (uptr i = 0; i < len; i++) {\n    uptr from = path[i];\n    uptr to = path[(i + 1) % len];\n    DDMutex *m0 = (DDMutex*)dd.getData(from);\n    DDMutex *m1 = (DDMutex*)dd.getData(to);\n\n    u32 stk_from = 0, stk_to = 0;\n    int unique_tid = 0;\n    dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);\n    // Printf(\"Edge: %zd=>%zd: %u/%u T%d\\n\", from, to, stk_from, stk_to,\n    //    unique_tid);\n    rep->loop[i].thr_ctx = unique_tid;\n    rep->loop[i].mtx_ctx0 = m0->ctx;\n    rep->loop[i].mtx_ctx1 = m1->ctx;\n    rep->loop[i].stk[0] = stk_to;\n    rep->loop[i].stk[1] = stk_from;\n  }\n}\n\nvoid DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {\n  DDLogicalThread *lt = cb->lt;\n  u32 stk = 0;\n  if (flags.second_deadlock_stack)\n    stk = cb->Unwind();\n  // Printf(\"T%p MutexLock:   %zx stk %u\\n\", lt, m->id, stk);\n  if (dd.onFirstLock(&lt->dd, m->id, stk))\n    return;\n  if (dd.onLockFast(&lt->dd, m->id, stk))\n    return;\n\n  SpinMutexLock lk(&mtx);\n  MutexEnsureID(lt, m);\n  if (wlock)  // Only a recursive rlock may be held.\n    CHECK(!dd.isHeld(&lt->dd, m->id));\n  if (!trylock)\n    dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());\n  dd.onLockAfter(&lt->dd, m->id, stk);\n}\n\nvoid DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {\n  // Printf(\"T%p MutexUnLock: %zx\\n\", cb->lt, m->id);\n  dd.onUnlock(&cb->lt->dd, m->id);\n}\n\nvoid DD::MutexDestroy(DDCallback *cb,\n    DDMutex *m) {\n  if (!m->id) return;\n  SpinMutexLock lk(&mtx);\n  if (dd.nodeBelongsToCurrentEpoch(m->id))\n    dd.removeNode(m->id);\n  m->id = 0;\n}\n\nDDReport *DD::GetReport(DDCallback *cb) {\n  if (!cb->lt->report_pending)\n    return nullptr;\n  cb->lt->report_pending = false;\n  return &cb->lt->rep;\n}\n\n} // namespace __sanitizer\n#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_deadlock_detector2.cpp",
    "content": "//===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Deadlock detector implementation based on adjacency lists.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_deadlock_detector_interface.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_mutex.h\"\n\n#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2\n\nnamespace __sanitizer {\n\nconst int kMaxNesting = 64;\nconst u32 kNoId = -1;\nconst u32 kEndId = -2;\nconst int kMaxLink = 8;\nconst int kL1Size = 1024;\nconst int kL2Size = 1024;\nconst int kMaxMutex = kL1Size * kL2Size;\n\nstruct Id {\n  u32 id;\n  u32 seq;\n\n  explicit Id(u32 id = 0, u32 seq = 0)\n      : id(id)\n      , seq(seq) {\n  }\n};\n\nstruct Link {\n  u32 id;\n  u32 seq;\n  u32 tid;\n  u32 stk0;\n  u32 stk1;\n\n  explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)\n      : id(id)\n      , seq(seq)\n      , tid(tid)\n      , stk0(s0)\n      , stk1(s1) {\n  }\n};\n\nstruct DDPhysicalThread {\n  DDReport rep;\n  bool report_pending;\n  bool visited[kMaxMutex];\n  Link pending[kMaxMutex];\n  Link path[kMaxMutex];\n};\n\nstruct ThreadMutex {\n  u32 id;\n  u32 stk;\n};\n\nstruct DDLogicalThread {\n  u64         ctx;\n  ThreadMutex locked[kMaxNesting];\n  int         nlocked;\n};\n\nstruct MutexState {\n  StaticSpinMutex mtx;\n  u32 seq;\n  int nlink;\n  Link link[kMaxLink];\n};\n\nstruct DD final : public DDetector {\n  explicit DD(const DDFlags *flags);\n\n  DDPhysicalThread* CreatePhysicalThread();\n  void DestroyPhysicalThread(DDPhysicalThread *pt);\n\n  DDLogicalThread* CreateLogicalThread(u64 ctx);\n  void DestroyLogicalThread(DDLogicalThread *lt);\n\n  void MutexInit(DDCallback *cb, DDMutex *m);\n  void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);\n  void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,\n      bool trylock);\n  void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);\n  void MutexDestroy(DDCallback *cb, DDMutex *m);\n\n  DDReport *GetReport(DDCallback *cb);\n\n  void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);\n  void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);\n  u32 allocateId(DDCallback *cb);\n  MutexState *getMutex(u32 id);\n  u32 getMutexId(MutexState *m);\n\n  DDFlags flags;\n\n  MutexState *mutex[kL1Size];\n\n  SpinMutex mtx;\n  InternalMmapVector<u32> free_id;\n  int id_gen = 0;\n};\n\nDDetector *DDetector::Create(const DDFlags *flags) {\n  (void)flags;\n  void *mem = MmapOrDie(sizeof(DD), \"deadlock detector\");\n  return new(mem) DD(flags);\n}\n\nDD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }\n\nDDPhysicalThread* DD::CreatePhysicalThread() {\n  DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),\n      \"deadlock detector (physical thread)\");\n  return pt;\n}\n\nvoid DD::DestroyPhysicalThread(DDPhysicalThread *pt) {\n  pt->~DDPhysicalThread();\n  UnmapOrDie(pt, sizeof(DDPhysicalThread));\n}\n\nDDLogicalThread* DD::CreateLogicalThread(u64 ctx) {\n  DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(\n      sizeof(DDLogicalThread));\n  lt->ctx = ctx;\n  lt->nlocked = 0;\n  return lt;\n}\n\nvoid DD::DestroyLogicalThread(DDLogicalThread *lt) {\n  lt->~DDLogicalThread();\n  InternalFree(lt);\n}\n\nvoid DD::MutexInit(DDCallback *cb, DDMutex *m) {\n  VPrintf(2, \"#%llu: DD::MutexInit(%p)\\n\", cb->lt->ctx, m);\n  m->id = kNoId;\n  m->recursion = 0;\n  atomic_store(&m->owner, 0, memory_order_relaxed);\n}\n\nMutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }\n\nu32 DD::getMutexId(MutexState *m) {\n  for (int i = 0; i < kL1Size; i++) {\n    MutexState *tab = mutex[i];\n    if (tab == 0)\n      break;\n    if (m >= tab && m < tab + kL2Size)\n      return i * kL2Size + (m - tab);\n  }\n  return -1;\n}\n\nu32 DD::allocateId(DDCallback *cb) {\n  u32 id = -1;\n  SpinMutexLock l(&mtx);\n  if (free_id.size() > 0) {\n    id = free_id.back();\n    free_id.pop_back();\n  } else {\n    CHECK_LT(id_gen, kMaxMutex);\n    if ((id_gen % kL2Size) == 0) {\n      mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(\n          kL2Size * sizeof(MutexState), \"deadlock detector (mutex table)\");\n    }\n    id = id_gen++;\n  }\n  CHECK_LE(id, kMaxMutex);\n  VPrintf(3, \"#%llu: DD::allocateId assign id %d\\n\", cb->lt->ctx, id);\n  return id;\n}\n\nvoid DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {\n  VPrintf(2, \"#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\\n\",\n      cb->lt->ctx, m, wlock, cb->lt->nlocked);\n  DDPhysicalThread *pt = cb->pt;\n  DDLogicalThread *lt = cb->lt;\n\n  uptr owner = atomic_load(&m->owner, memory_order_relaxed);\n  if (owner == (uptr)cb->lt) {\n    VPrintf(3, \"#%llu: DD::MutexBeforeLock recursive\\n\",\n        cb->lt->ctx);\n    return;\n  }\n\n  CHECK_LE(lt->nlocked, kMaxNesting);\n\n  // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?\n  if (m->id == kNoId)\n    m->id = allocateId(cb);\n\n  ThreadMutex *tm = &lt->locked[lt->nlocked++];\n  tm->id = m->id;\n  if (flags.second_deadlock_stack)\n    tm->stk = cb->Unwind();\n  if (lt->nlocked == 1) {\n    VPrintf(3, \"#%llu: DD::MutexBeforeLock first mutex\\n\",\n        cb->lt->ctx);\n    return;\n  }\n\n  bool added = false;\n  MutexState *mtx = getMutex(m->id);\n  for (int i = 0; i < lt->nlocked - 1; i++) {\n    u32 id1 = lt->locked[i].id;\n    u32 stk1 = lt->locked[i].stk;\n    MutexState *mtx1 = getMutex(id1);\n    SpinMutexLock l(&mtx1->mtx);\n    if (mtx1->nlink == kMaxLink) {\n      // FIXME(dvyukov): check stale links\n      continue;\n    }\n    int li = 0;\n    for (; li < mtx1->nlink; li++) {\n      Link *link = &mtx1->link[li];\n      if (link->id == m->id) {\n        if (link->seq != mtx->seq) {\n          link->seq = mtx->seq;\n          link->tid = lt->ctx;\n          link->stk0 = stk1;\n          link->stk1 = cb->Unwind();\n          added = true;\n          VPrintf(3, \"#%llu: DD::MutexBeforeLock added %d->%d link\\n\",\n              cb->lt->ctx, getMutexId(mtx1), m->id);\n        }\n        break;\n      }\n    }\n    if (li == mtx1->nlink) {\n      // FIXME(dvyukov): check stale links\n      Link *link = &mtx1->link[mtx1->nlink++];\n      link->id = m->id;\n      link->seq = mtx->seq;\n      link->tid = lt->ctx;\n      link->stk0 = stk1;\n      link->stk1 = cb->Unwind();\n      added = true;\n      VPrintf(3, \"#%llu: DD::MutexBeforeLock added %d->%d link\\n\",\n          cb->lt->ctx, getMutexId(mtx1), m->id);\n    }\n  }\n\n  if (!added || mtx->nlink == 0) {\n    VPrintf(3, \"#%llu: DD::MutexBeforeLock don't check\\n\",\n        cb->lt->ctx);\n    return;\n  }\n\n  CycleCheck(pt, lt, m);\n}\n\nvoid DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,\n    bool trylock) {\n  VPrintf(2, \"#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\\n\",\n      cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);\n  DDLogicalThread *lt = cb->lt;\n\n  uptr owner = atomic_load(&m->owner, memory_order_relaxed);\n  if (owner == (uptr)cb->lt) {\n    VPrintf(3, \"#%llu: DD::MutexAfterLock recursive\\n\", cb->lt->ctx);\n    CHECK(wlock);\n    m->recursion++;\n    return;\n  }\n  CHECK_EQ(owner, 0);\n  if (wlock) {\n    VPrintf(3, \"#%llu: DD::MutexAfterLock set owner\\n\", cb->lt->ctx);\n    CHECK_EQ(m->recursion, 0);\n    m->recursion = 1;\n    atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);\n  }\n\n  if (!trylock)\n    return;\n\n  CHECK_LE(lt->nlocked, kMaxNesting);\n  if (m->id == kNoId)\n    m->id = allocateId(cb);\n  ThreadMutex *tm = &lt->locked[lt->nlocked++];\n  tm->id = m->id;\n  if (flags.second_deadlock_stack)\n    tm->stk = cb->Unwind();\n}\n\nvoid DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {\n  VPrintf(2, \"#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\\n\",\n      cb->lt->ctx, m, wlock, cb->lt->nlocked);\n  DDLogicalThread *lt = cb->lt;\n\n  uptr owner = atomic_load(&m->owner, memory_order_relaxed);\n  if (owner == (uptr)cb->lt) {\n    VPrintf(3, \"#%llu: DD::MutexBeforeUnlock recursive\\n\", cb->lt->ctx);\n    if (--m->recursion > 0)\n      return;\n    VPrintf(3, \"#%llu: DD::MutexBeforeUnlock reset owner\\n\", cb->lt->ctx);\n    atomic_store(&m->owner, 0, memory_order_relaxed);\n  }\n  CHECK_NE(m->id, kNoId);\n  int last = lt->nlocked - 1;\n  for (int i = last; i >= 0; i--) {\n    if (cb->lt->locked[i].id == m->id) {\n      lt->locked[i] = lt->locked[last];\n      lt->nlocked--;\n      break;\n    }\n  }\n}\n\nvoid DD::MutexDestroy(DDCallback *cb, DDMutex *m) {\n  VPrintf(2, \"#%llu: DD::MutexDestroy(%p)\\n\",\n      cb->lt->ctx, m);\n  DDLogicalThread *lt = cb->lt;\n\n  if (m->id == kNoId)\n    return;\n\n  // Remove the mutex from lt->locked if there.\n  int last = lt->nlocked - 1;\n  for (int i = last; i >= 0; i--) {\n    if (lt->locked[i].id == m->id) {\n      lt->locked[i] = lt->locked[last];\n      lt->nlocked--;\n      break;\n    }\n  }\n\n  // Clear and invalidate the mutex descriptor.\n  {\n    MutexState *mtx = getMutex(m->id);\n    SpinMutexLock l(&mtx->mtx);\n    mtx->seq++;\n    mtx->nlink = 0;\n  }\n\n  // Return id to cache.\n  {\n    SpinMutexLock l(&mtx);\n    free_id.push_back(m->id);\n  }\n}\n\nvoid DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,\n    DDMutex *m) {\n  internal_memset(pt->visited, 0, sizeof(pt->visited));\n  int npath = 0;\n  int npending = 0;\n  {\n    MutexState *mtx = getMutex(m->id);\n    SpinMutexLock l(&mtx->mtx);\n    for (int li = 0; li < mtx->nlink; li++)\n      pt->pending[npending++] = mtx->link[li];\n  }\n  while (npending > 0) {\n    Link link = pt->pending[--npending];\n    if (link.id == kEndId) {\n      npath--;\n      continue;\n    }\n    if (pt->visited[link.id])\n      continue;\n    MutexState *mtx1 = getMutex(link.id);\n    SpinMutexLock l(&mtx1->mtx);\n    if (mtx1->seq != link.seq)\n      continue;\n    pt->visited[link.id] = true;\n    if (mtx1->nlink == 0)\n      continue;\n    pt->path[npath++] = link;\n    pt->pending[npending++] = Link(kEndId);\n    if (link.id == m->id)\n      return Report(pt, lt, npath);  // Bingo!\n    for (int li = 0; li < mtx1->nlink; li++) {\n      Link *link1 = &mtx1->link[li];\n      // MutexState *mtx2 = getMutex(link->id);\n      // FIXME(dvyukov): fast seq check\n      // FIXME(dvyukov): fast nlink != 0 check\n      // FIXME(dvyukov): fast pending check?\n      // FIXME(dvyukov): npending can be larger than kMaxMutex\n      pt->pending[npending++] = *link1;\n    }\n  }\n}\n\nvoid DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {\n  DDReport *rep = &pt->rep;\n  rep->n = npath;\n  for (int i = 0; i < npath; i++) {\n    Link *link = &pt->path[i];\n    Link *link0 = &pt->path[i ? i - 1 : npath - 1];\n    rep->loop[i].thr_ctx = link->tid;\n    rep->loop[i].mtx_ctx0 = link0->id;\n    rep->loop[i].mtx_ctx1 = link->id;\n    rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;\n    rep->loop[i].stk[1] = link->stk1;\n  }\n  pt->report_pending = true;\n}\n\nDDReport *DD::GetReport(DDCallback *cb) {\n  if (!cb->pt->report_pending)\n    return 0;\n  cb->pt->report_pending = false;\n  return &cb->pt->rep;\n}\n\n}  // namespace __sanitizer\n#endif  // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_deadlock_detector_interface.h",
    "content": "//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// Abstract deadlock detector interface.\n// FIXME: this is work in progress, nothing really works yet.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H\n#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H\n\n#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION\n# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1\n#endif\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_atomic.h\"\n\nnamespace __sanitizer {\n\n// dd - deadlock detector.\n// lt - logical (user) thread.\n// pt - physical (OS) thread.\n\nstruct DDPhysicalThread;\nstruct DDLogicalThread;\n\nstruct DDMutex {\n#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1\n  uptr id;\n  u32  stk;  // creation stack\n#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2\n  u32              id;\n  u32              recursion;\n  atomic_uintptr_t owner;\n#else\n# error \"BAD SANITIZER_DEADLOCK_DETECTOR_VERSION\"\n#endif\n  u64  ctx;\n};\n\nstruct DDFlags {\n  bool second_deadlock_stack;\n};\n\nstruct DDReport {\n  enum { kMaxLoopSize = 20 };\n  int n;  // number of entries in loop\n  struct {\n    u64 thr_ctx;   // user thread context\n    u64 mtx_ctx0;  // user mutex context, start of the edge\n    u64 mtx_ctx1;  // user mutex context, end of the edge\n    u32 stk[2];  // stack ids for the edge\n  } loop[kMaxLoopSize];\n};\n\nstruct DDCallback {\n  DDPhysicalThread *pt;\n  DDLogicalThread  *lt;\n\n  virtual u32 Unwind() { return 0; }\n  virtual int UniqueTid() { return 0; }\n\n protected:\n  ~DDCallback() {}\n};\n\nstruct DDetector {\n  static DDetector *Create(const DDFlags *flags);\n\n  virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }\n  virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}\n\n  virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }\n  virtual void DestroyLogicalThread(DDLogicalThread *lt) {}\n\n  virtual void MutexInit(DDCallback *cb, DDMutex *m) {}\n  virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}\n  virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,\n      bool trylock) {}\n  virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}\n  virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}\n\n  virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }\n\n protected:\n  ~DDetector() {}\n};\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_dense_map.h",
    "content": "//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This is fork of llvm/ADT/DenseMap.h class with the following changes:\n//  * Use mmap to allocate.\n//  * No iterators.\n//  * Does not shrink.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_DENSE_MAP_H\n#define SANITIZER_DENSE_MAP_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_dense_map_info.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_type_traits.h\"\n\nnamespace __sanitizer {\n\ntemplate <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,\n          typename BucketT>\nclass DenseMapBase {\n public:\n  using size_type = unsigned;\n  using key_type = KeyT;\n  using mapped_type = ValueT;\n  using value_type = BucketT;\n\n  WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }\n  unsigned size() const { return getNumEntries(); }\n\n  /// Grow the densemap so that it can contain at least \\p NumEntries items\n  /// before resizing again.\n  void reserve(size_type NumEntries) {\n    auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);\n    if (NumBuckets > getNumBuckets())\n      grow(NumBuckets);\n  }\n\n  void clear() {\n    if (getNumEntries() == 0 && getNumTombstones() == 0)\n      return;\n\n    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();\n    if (__sanitizer::is_trivially_destructible<ValueT>::value) {\n      // Use a simpler loop when values don't need destruction.\n      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)\n        P->getFirst() = EmptyKey;\n    } else {\n      unsigned NumEntries = getNumEntries();\n      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {\n        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {\n          if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {\n            P->getSecond().~ValueT();\n            --NumEntries;\n          }\n          P->getFirst() = EmptyKey;\n        }\n      }\n      CHECK_EQ(NumEntries, 0);\n    }\n    setNumEntries(0);\n    setNumTombstones(0);\n  }\n\n  /// Return 1 if the specified key is in the map, 0 otherwise.\n  size_type count(const KeyT &Key) const {\n    const BucketT *TheBucket;\n    return LookupBucketFor(Key, TheBucket) ? 1 : 0;\n  }\n\n  value_type *find(const KeyT &Key) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return TheBucket;\n    return nullptr;\n  }\n  const value_type *find(const KeyT &Key) const {\n    const BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return TheBucket;\n    return nullptr;\n  }\n\n  /// Alternate version of find() which allows a different, and possibly\n  /// less expensive, key type.\n  /// The DenseMapInfo is responsible for supplying methods\n  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key\n  /// type used.\n  template <class LookupKeyT>\n  value_type *find_as(const LookupKeyT &Key) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return TheBucket;\n    return nullptr;\n  }\n  template <class LookupKeyT>\n  const value_type *find_as(const LookupKeyT &Key) const {\n    const BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return TheBucket;\n    return nullptr;\n  }\n\n  /// lookup - Return the entry for the specified key, or a default\n  /// constructed value if no such entry exists.\n  ValueT lookup(const KeyT &Key) const {\n    const BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return TheBucket->getSecond();\n    return ValueT();\n  }\n\n  // Inserts key,value pair into the map if the key isn't already in the map.\n  // If the key is already in the map, it returns false and doesn't update the\n  // value.\n  detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {\n    return try_emplace(KV.first, KV.second);\n  }\n\n  // Inserts key,value pair into the map if the key isn't already in the map.\n  // If the key is already in the map, it returns false and doesn't update the\n  // value.\n  detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {\n    return try_emplace(__sanitizer::move(KV.first),\n                       __sanitizer::move(KV.second));\n  }\n\n  // Inserts key,value pair into the map if the key isn't already in the map.\n  // The value is constructed in-place if the key is not in the map, otherwise\n  // it is not moved.\n  template <typename... Ts>\n  detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,\n                                                       Ts &&...Args) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return {TheBucket, false};  // Already in map.\n\n    // Otherwise, insert the new element.\n    TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),\n                                 __sanitizer::forward<Ts>(Args)...);\n    return {TheBucket, true};\n  }\n\n  // Inserts key,value pair into the map if the key isn't already in the map.\n  // The value is constructed in-place if the key is not in the map, otherwise\n  // it is not moved.\n  template <typename... Ts>\n  detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,\n                                                       Ts &&...Args) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return {TheBucket, false};  // Already in map.\n\n    // Otherwise, insert the new element.\n    TheBucket =\n        InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);\n    return {TheBucket, true};\n  }\n\n  /// Alternate version of insert() which allows a different, and possibly\n  /// less expensive, key type.\n  /// The DenseMapInfo is responsible for supplying methods\n  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key\n  /// type used.\n  template <typename LookupKeyT>\n  detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,\n                                                     const LookupKeyT &Val) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Val, TheBucket))\n      return {TheBucket, false};  // Already in map.\n\n    // Otherwise, insert the new element.\n    TheBucket =\n        InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),\n                                   __sanitizer::move(KV.second), Val);\n    return {TheBucket, true};\n  }\n\n  bool erase(const KeyT &Val) {\n    BucketT *TheBucket;\n    if (!LookupBucketFor(Val, TheBucket))\n      return false;  // not in map.\n\n    TheBucket->getSecond().~ValueT();\n    TheBucket->getFirst() = getTombstoneKey();\n    decrementNumEntries();\n    incrementNumTombstones();\n    return true;\n  }\n\n  void erase(value_type *I) {\n    CHECK_NE(I, nullptr);\n    BucketT *TheBucket = &*I;\n    TheBucket->getSecond().~ValueT();\n    TheBucket->getFirst() = getTombstoneKey();\n    decrementNumEntries();\n    incrementNumTombstones();\n  }\n\n  value_type &FindAndConstruct(const KeyT &Key) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return *TheBucket;\n\n    return *InsertIntoBucket(TheBucket, Key);\n  }\n\n  ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }\n\n  value_type &FindAndConstruct(KeyT &&Key) {\n    BucketT *TheBucket;\n    if (LookupBucketFor(Key, TheBucket))\n      return *TheBucket;\n\n    return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));\n  }\n\n  ValueT &operator[](KeyT &&Key) {\n    return FindAndConstruct(__sanitizer::move(Key)).second;\n  }\n\n  /// Iterate over active entries of the container.\n  ///\n  /// Function can return fast to stop the process.\n  template <class Fn>\n  void forEach(Fn fn) {\n    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();\n    for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {\n      const KeyT K = P->getFirst();\n      if (!KeyInfoT::isEqual(K, EmptyKey) &&\n          !KeyInfoT::isEqual(K, TombstoneKey)) {\n        if (!fn(*P))\n          return;\n      }\n    }\n  }\n\n  template <class Fn>\n  void forEach(Fn fn) const {\n    const_cast<DenseMapBase *>(this)->forEach(\n        [&](const value_type &KV) { return fn(KV); });\n  }\n\n protected:\n  DenseMapBase() = default;\n\n  void destroyAll() {\n    if (getNumBuckets() == 0)  // Nothing to do.\n      return;\n\n    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();\n    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {\n      if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&\n          !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))\n        P->getSecond().~ValueT();\n      P->getFirst().~KeyT();\n    }\n  }\n\n  void initEmpty() {\n    setNumEntries(0);\n    setNumTombstones(0);\n\n    CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);\n    const KeyT EmptyKey = getEmptyKey();\n    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)\n      ::new (&B->getFirst()) KeyT(EmptyKey);\n  }\n\n  /// Returns the number of buckets to allocate to ensure that the DenseMap can\n  /// accommodate \\p NumEntries without need to grow().\n  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {\n    // Ensure that \"NumEntries * 4 < NumBuckets * 3\"\n    if (NumEntries == 0)\n      return 0;\n    // +1 is required because of the strict equality.\n    // For example if NumEntries is 48, we need to return 401.\n    return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);\n  }\n\n  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {\n    initEmpty();\n\n    // Insert all the old elements.\n    const KeyT EmptyKey = getEmptyKey();\n    const KeyT TombstoneKey = getTombstoneKey();\n    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {\n      if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&\n          !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {\n        // Insert the key/value into the new table.\n        BucketT *DestBucket;\n        bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);\n        (void)FoundVal;  // silence warning.\n        CHECK(!FoundVal);\n        DestBucket->getFirst() = __sanitizer::move(B->getFirst());\n        ::new (&DestBucket->getSecond())\n            ValueT(__sanitizer::move(B->getSecond()));\n        incrementNumEntries();\n\n        // Free the value.\n        B->getSecond().~ValueT();\n      }\n      B->getFirst().~KeyT();\n    }\n  }\n\n  template <typename OtherBaseT>\n  void copyFrom(\n      const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {\n    CHECK_NE(&other, this);\n    CHECK_EQ(getNumBuckets(), other.getNumBuckets());\n\n    setNumEntries(other.getNumEntries());\n    setNumTombstones(other.getNumTombstones());\n\n    if (__sanitizer::is_trivially_copyable<KeyT>::value &&\n        __sanitizer::is_trivially_copyable<ValueT>::value)\n      internal_memcpy(reinterpret_cast<void *>(getBuckets()),\n                      other.getBuckets(), getNumBuckets() * sizeof(BucketT));\n    else\n      for (uptr i = 0; i < getNumBuckets(); ++i) {\n        ::new (&getBuckets()[i].getFirst())\n            KeyT(other.getBuckets()[i].getFirst());\n        if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&\n            !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))\n          ::new (&getBuckets()[i].getSecond())\n              ValueT(other.getBuckets()[i].getSecond());\n      }\n  }\n\n  static unsigned getHashValue(const KeyT &Val) {\n    return KeyInfoT::getHashValue(Val);\n  }\n\n  template <typename LookupKeyT>\n  static unsigned getHashValue(const LookupKeyT &Val) {\n    return KeyInfoT::getHashValue(Val);\n  }\n\n  static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }\n\n  static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }\n\n private:\n  unsigned getNumEntries() const {\n    return static_cast<const DerivedT *>(this)->getNumEntries();\n  }\n\n  void setNumEntries(unsigned Num) {\n    static_cast<DerivedT *>(this)->setNumEntries(Num);\n  }\n\n  void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }\n\n  void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }\n\n  unsigned getNumTombstones() const {\n    return static_cast<const DerivedT *>(this)->getNumTombstones();\n  }\n\n  void setNumTombstones(unsigned Num) {\n    static_cast<DerivedT *>(this)->setNumTombstones(Num);\n  }\n\n  void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }\n\n  void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }\n\n  const BucketT *getBuckets() const {\n    return static_cast<const DerivedT *>(this)->getBuckets();\n  }\n\n  BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }\n\n  unsigned getNumBuckets() const {\n    return static_cast<const DerivedT *>(this)->getNumBuckets();\n  }\n\n  BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }\n\n  const BucketT *getBucketsEnd() const {\n    return getBuckets() + getNumBuckets();\n  }\n\n  void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }\n\n  template <typename KeyArg, typename... ValueArgs>\n  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,\n                            ValueArgs &&...Values) {\n    TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);\n\n    TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);\n    ::new (&TheBucket->getSecond())\n        ValueT(__sanitizer::forward<ValueArgs>(Values)...);\n    return TheBucket;\n  }\n\n  template <typename LookupKeyT>\n  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,\n                                      ValueT &&Value, LookupKeyT &Lookup) {\n    TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);\n\n    TheBucket->getFirst() = __sanitizer::move(Key);\n    ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));\n    return TheBucket;\n  }\n\n  template <typename LookupKeyT>\n  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,\n                                BucketT *TheBucket) {\n    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of\n    // the buckets are empty (meaning that many are filled with tombstones),\n    // grow the table.\n    //\n    // The later case is tricky.  For example, if we had one empty bucket with\n    // tons of tombstones, failing lookups (e.g. for insertion) would have to\n    // probe almost the entire table until it found the empty bucket.  If the\n    // table completely filled with tombstones, no lookup would ever succeed,\n    // causing infinite loops in lookup.\n    unsigned NewNumEntries = getNumEntries() + 1;\n    unsigned NumBuckets = getNumBuckets();\n    if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {\n      this->grow(NumBuckets * 2);\n      LookupBucketFor(Lookup, TheBucket);\n      NumBuckets = getNumBuckets();\n    } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=\n                        NumBuckets / 8)) {\n      this->grow(NumBuckets);\n      LookupBucketFor(Lookup, TheBucket);\n    }\n    CHECK(TheBucket);\n\n    // Only update the state after we've grown our bucket space appropriately\n    // so that when growing buckets we have self-consistent entry count.\n    incrementNumEntries();\n\n    // If we are writing over a tombstone, remember this.\n    const KeyT EmptyKey = getEmptyKey();\n    if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))\n      decrementNumTombstones();\n\n    return TheBucket;\n  }\n\n  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in\n  /// FoundBucket.  If the bucket contains the key and a value, this returns\n  /// true, otherwise it returns a bucket with an empty marker or tombstone and\n  /// returns false.\n  template <typename LookupKeyT>\n  bool LookupBucketFor(const LookupKeyT &Val,\n                       const BucketT *&FoundBucket) const {\n    const BucketT *BucketsPtr = getBuckets();\n    const unsigned NumBuckets = getNumBuckets();\n\n    if (NumBuckets == 0) {\n      FoundBucket = nullptr;\n      return false;\n    }\n\n    // FoundTombstone - Keep track of whether we find a tombstone while probing.\n    const BucketT *FoundTombstone = nullptr;\n    const KeyT EmptyKey = getEmptyKey();\n    const KeyT TombstoneKey = getTombstoneKey();\n    CHECK(!KeyInfoT::isEqual(Val, EmptyKey));\n    CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));\n\n    unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);\n    unsigned ProbeAmt = 1;\n    while (true) {\n      const BucketT *ThisBucket = BucketsPtr + BucketNo;\n      // Found Val's bucket?  If so, return it.\n      if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {\n        FoundBucket = ThisBucket;\n        return true;\n      }\n\n      // If we found an empty bucket, the key doesn't exist in the set.\n      // Insert it and return the default value.\n      if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {\n        // If we've already seen a tombstone while probing, fill it in instead\n        // of the empty bucket we eventually probed to.\n        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;\n        return false;\n      }\n\n      // If this is a tombstone, remember it.  If Val ends up not in the map, we\n      // prefer to return it than something that would require more probing.\n      if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&\n          !FoundTombstone)\n        FoundTombstone = ThisBucket;  // Remember the first tombstone found.\n\n      // Otherwise, it's a hash collision or a tombstone, continue quadratic\n      // probing.\n      BucketNo += ProbeAmt++;\n      BucketNo &= (NumBuckets - 1);\n    }\n  }\n\n  template <typename LookupKeyT>\n  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {\n    const BucketT *ConstFoundBucket;\n    bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(\n        Val, ConstFoundBucket);\n    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);\n    return Result;\n  }\n\n public:\n  /// Return the approximate size (in bytes) of the actual map.\n  /// This is just the raw memory used by DenseMap.\n  /// If entries are pointers to objects, the size of the referenced objects\n  /// are not included.\n  uptr getMemorySize() const {\n    return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());\n  }\n};\n\n/// Equality comparison for DenseMap.\n///\n/// Iterates over elements of LHS confirming that each (key, value) pair in LHS\n/// is also in RHS, and that no additional pairs are in RHS.\n/// Equivalent to N calls to RHS.find and N value comparisons. Amortized\n/// complexity is linear, worst case is O(N^2) (if every hash collides).\ntemplate <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,\n          typename BucketT>\nbool operator==(\n    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,\n    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {\n  if (LHS.size() != RHS.size())\n    return false;\n\n  bool R = true;\n  LHS.forEach(\n      [&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,\n                                      BucketT>::value_type &KV) -> bool {\n        const auto *I = RHS.find(KV.first);\n        if (!I || I->second != KV.second) {\n          R = false;\n          return false;\n        }\n        return true;\n      });\n\n  return R;\n}\n\n/// Inequality comparison for DenseMap.\n///\n/// Equivalent to !(LHS == RHS). See operator== for performance notes.\ntemplate <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,\n          typename BucketT>\nbool operator!=(\n    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,\n    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {\n  return !(LHS == RHS);\n}\n\ntemplate <typename KeyT, typename ValueT,\n          typename KeyInfoT = DenseMapInfo<KeyT>,\n          typename BucketT = detail::DenseMapPair<KeyT, ValueT>>\nclass DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,\n                                     KeyT, ValueT, KeyInfoT, BucketT> {\n  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;\n\n  // Lift some types from the dependent base class into this class for\n  // simplicity of referring to them.\n  using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;\n\n  BucketT *Buckets = nullptr;\n  unsigned NumEntries = 0;\n  unsigned NumTombstones = 0;\n  unsigned NumBuckets = 0;\n\n public:\n  /// Create a DenseMap with an optional \\p InitialReserve that guarantee that\n  /// this number of elements can be inserted in the map without grow()\n  explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }\n  constexpr DenseMap() = default;\n\n  DenseMap(const DenseMap &other) : BaseT() {\n    init(0);\n    copyFrom(other);\n  }\n\n  DenseMap(DenseMap &&other) : BaseT() {\n    init(0);\n    swap(other);\n  }\n\n  ~DenseMap() {\n    this->destroyAll();\n    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);\n  }\n\n  void swap(DenseMap &RHS) {\n    Swap(Buckets, RHS.Buckets);\n    Swap(NumEntries, RHS.NumEntries);\n    Swap(NumTombstones, RHS.NumTombstones);\n    Swap(NumBuckets, RHS.NumBuckets);\n  }\n\n  DenseMap &operator=(const DenseMap &other) {\n    if (&other != this)\n      copyFrom(other);\n    return *this;\n  }\n\n  DenseMap &operator=(DenseMap &&other) {\n    this->destroyAll();\n    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));\n    init(0);\n    swap(other);\n    return *this;\n  }\n\n  void copyFrom(const DenseMap &other) {\n    this->destroyAll();\n    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);\n    if (allocateBuckets(other.NumBuckets)) {\n      this->BaseT::copyFrom(other);\n    } else {\n      NumEntries = 0;\n      NumTombstones = 0;\n    }\n  }\n\n  void init(unsigned InitNumEntries) {\n    auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);\n    if (allocateBuckets(InitBuckets)) {\n      this->BaseT::initEmpty();\n    } else {\n      NumEntries = 0;\n      NumTombstones = 0;\n    }\n  }\n\n  void grow(unsigned AtLeast) {\n    unsigned OldNumBuckets = NumBuckets;\n    BucketT *OldBuckets = Buckets;\n\n    allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));\n    CHECK(Buckets);\n    if (!OldBuckets) {\n      this->BaseT::initEmpty();\n      return;\n    }\n\n    this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);\n\n    // Free the old table.\n    deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);\n  }\n\n private:\n  unsigned getNumEntries() const { return NumEntries; }\n\n  void setNumEntries(unsigned Num) { NumEntries = Num; }\n\n  unsigned getNumTombstones() const { return NumTombstones; }\n\n  void setNumTombstones(unsigned Num) { NumTombstones = Num; }\n\n  BucketT *getBuckets() const { return Buckets; }\n\n  unsigned getNumBuckets() const { return NumBuckets; }\n\n  bool allocateBuckets(unsigned Num) {\n    NumBuckets = Num;\n    if (NumBuckets == 0) {\n      Buckets = nullptr;\n      return false;\n    }\n\n    uptr Size = sizeof(BucketT) * NumBuckets;\n    if (Size * 2 <= GetPageSizeCached()) {\n      // We always allocate at least a page, so use entire space.\n      unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);\n      Size <<= Log2;\n      NumBuckets <<= Log2;\n      CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);\n      CHECK_GT(Size * 2, GetPageSizeCached());\n    }\n    Buckets = static_cast<BucketT *>(allocate_buffer(Size));\n    return true;\n  }\n\n  static void *allocate_buffer(uptr Size) {\n    return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), \"DenseMap\");\n  }\n\n  static void deallocate_buffer(void *Ptr, uptr Size) {\n    UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));\n  }\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_DENSE_MAP_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_dense_map_info.h",
    "content": "//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_DENSE_MAP_INFO_H\n#define SANITIZER_DENSE_MAP_INFO_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_type_traits.h\"\n\nnamespace __sanitizer {\n\nnamespace detail {\n\n/// Simplistic combination of 32-bit hash values into 32-bit hash values.\nstatic constexpr unsigned combineHashValue(unsigned a, unsigned b) {\n  u64 key = (u64)a << 32 | (u64)b;\n  key += ~(key << 32);\n  key ^= (key >> 22);\n  key += ~(key << 13);\n  key ^= (key >> 8);\n  key += (key << 3);\n  key ^= (key >> 15);\n  key += ~(key << 27);\n  key ^= (key >> 31);\n  return (unsigned)key;\n}\n\n// We extend a pair to allow users to override the bucket type with their own\n// implementation without requiring two members.\ntemplate <typename KeyT, typename ValueT>\nstruct DenseMapPair {\n  KeyT first = {};\n  ValueT second = {};\n  constexpr DenseMapPair() = default;\n  constexpr DenseMapPair(const KeyT &f, const ValueT &s)\n      : first(f), second(s) {}\n\n  template <typename KeyT2, typename ValueT2>\n  constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s)\n      : first(__sanitizer::forward<KeyT2>(f)),\n        second(__sanitizer::forward<ValueT2>(s)) {}\n\n  constexpr DenseMapPair(const DenseMapPair &other) = default;\n  constexpr DenseMapPair &operator=(const DenseMapPair &other) = default;\n  constexpr DenseMapPair(DenseMapPair &&other) = default;\n  constexpr DenseMapPair &operator=(DenseMapPair &&other) = default;\n\n  KeyT &getFirst() { return first; }\n  const KeyT &getFirst() const { return first; }\n  ValueT &getSecond() { return second; }\n  const ValueT &getSecond() const { return second; }\n};\n\n}  // end namespace detail\n\ntemplate <typename T>\nstruct DenseMapInfo {\n  // static T getEmptyKey();\n  // static T getTombstoneKey();\n  // static unsigned getHashValue(const T &Val);\n  // static bool isEqual(const T &LHS, const T &RHS);\n};\n\n// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values\n// that are aligned to alignof(T) bytes, but try to avoid requiring T to be\n// complete. This allows clients to instantiate DenseMap<T*, ...> with forward\n// declared key types. Assume that no pointer key type requires more than 4096\n// bytes of alignment.\ntemplate <typename T>\nstruct DenseMapInfo<T *> {\n  // The following should hold, but it would require T to be complete:\n  // static_assert(alignof(T) <= (1 << Log2MaxAlign),\n  //               \"DenseMap does not support pointer keys requiring more than \"\n  //               \"Log2MaxAlign bits of alignment\");\n  static constexpr uptr Log2MaxAlign = 12;\n\n  static constexpr T *getEmptyKey() {\n    uptr Val = static_cast<uptr>(-1);\n    Val <<= Log2MaxAlign;\n    return reinterpret_cast<T *>(Val);\n  }\n\n  static constexpr T *getTombstoneKey() {\n    uptr Val = static_cast<uptr>(-2);\n    Val <<= Log2MaxAlign;\n    return reinterpret_cast<T *>(Val);\n  }\n\n  static constexpr unsigned getHashValue(const T *PtrVal) {\n    return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9);\n  }\n\n  static constexpr bool isEqual(const T *LHS, const T *RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for chars.\ntemplate <>\nstruct DenseMapInfo<char> {\n  static constexpr char getEmptyKey() { return ~0; }\n  static constexpr char getTombstoneKey() { return ~0 - 1; }\n  static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; }\n\n  static constexpr bool isEqual(const char &LHS, const char &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for unsigned chars.\ntemplate <>\nstruct DenseMapInfo<unsigned char> {\n  static constexpr unsigned char getEmptyKey() { return ~0; }\n  static constexpr unsigned char getTombstoneKey() { return ~0 - 1; }\n  static constexpr unsigned getHashValue(const unsigned char &Val) {\n    return Val * 37U;\n  }\n\n  static constexpr bool isEqual(const unsigned char &LHS,\n                                const unsigned char &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for unsigned shorts.\ntemplate <>\nstruct DenseMapInfo<unsigned short> {\n  static constexpr unsigned short getEmptyKey() { return 0xFFFF; }\n  static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; }\n  static constexpr unsigned getHashValue(const unsigned short &Val) {\n    return Val * 37U;\n  }\n\n  static constexpr bool isEqual(const unsigned short &LHS,\n                                const unsigned short &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for unsigned ints.\ntemplate <>\nstruct DenseMapInfo<unsigned> {\n  static constexpr unsigned getEmptyKey() { return ~0U; }\n  static constexpr unsigned getTombstoneKey() { return ~0U - 1; }\n  static constexpr unsigned getHashValue(const unsigned &Val) {\n    return Val * 37U;\n  }\n\n  static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for unsigned longs.\ntemplate <>\nstruct DenseMapInfo<unsigned long> {\n  static constexpr unsigned long getEmptyKey() { return ~0UL; }\n  static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; }\n\n  static constexpr unsigned getHashValue(const unsigned long &Val) {\n    return (unsigned)(Val * 37UL);\n  }\n\n  static constexpr bool isEqual(const unsigned long &LHS,\n                                const unsigned long &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for unsigned long longs.\ntemplate <>\nstruct DenseMapInfo<unsigned long long> {\n  static constexpr unsigned long long getEmptyKey() { return ~0ULL; }\n  static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }\n\n  static constexpr unsigned getHashValue(const unsigned long long &Val) {\n    return (unsigned)(Val * 37ULL);\n  }\n\n  static constexpr bool isEqual(const unsigned long long &LHS,\n                                const unsigned long long &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for shorts.\ntemplate <>\nstruct DenseMapInfo<short> {\n  static constexpr short getEmptyKey() { return 0x7FFF; }\n  static constexpr short getTombstoneKey() { return -0x7FFF - 1; }\n  static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; }\n  static constexpr bool isEqual(const short &LHS, const short &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for ints.\ntemplate <>\nstruct DenseMapInfo<int> {\n  static constexpr int getEmptyKey() { return 0x7fffffff; }\n  static constexpr int getTombstoneKey() { return -0x7fffffff - 1; }\n  static constexpr unsigned getHashValue(const int &Val) {\n    return (unsigned)(Val * 37U);\n  }\n\n  static constexpr bool isEqual(const int &LHS, const int &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for longs.\ntemplate <>\nstruct DenseMapInfo<long> {\n  static constexpr long getEmptyKey() {\n    return (1UL << (sizeof(long) * 8 - 1)) - 1UL;\n  }\n\n  static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; }\n\n  static constexpr unsigned getHashValue(const long &Val) {\n    return (unsigned)(Val * 37UL);\n  }\n\n  static constexpr bool isEqual(const long &LHS, const long &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for long longs.\ntemplate <>\nstruct DenseMapInfo<long long> {\n  static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; }\n  static constexpr long long getTombstoneKey() {\n    return -0x7fffffffffffffffLL - 1;\n  }\n\n  static constexpr unsigned getHashValue(const long long &Val) {\n    return (unsigned)(Val * 37ULL);\n  }\n\n  static constexpr bool isEqual(const long long &LHS, const long long &RHS) {\n    return LHS == RHS;\n  }\n};\n\n// Provide DenseMapInfo for all pairs whose members have info.\ntemplate <typename T, typename U>\nstruct DenseMapInfo<detail::DenseMapPair<T, U>> {\n  using Pair = detail::DenseMapPair<T, U>;\n  using FirstInfo = DenseMapInfo<T>;\n  using SecondInfo = DenseMapInfo<U>;\n\n  static constexpr Pair getEmptyKey() {\n    return detail::DenseMapPair<T, U>(FirstInfo::getEmptyKey(),\n                                      SecondInfo::getEmptyKey());\n  }\n\n  static constexpr Pair getTombstoneKey() {\n    return detail::DenseMapPair<T, U>(FirstInfo::getTombstoneKey(),\n                                      SecondInfo::getTombstoneKey());\n  }\n\n  static constexpr unsigned getHashValue(const Pair &PairVal) {\n    return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),\n                                    SecondInfo::getHashValue(PairVal.second));\n  }\n\n  static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) {\n    return FirstInfo::isEqual(LHS.first, RHS.first) &&\n           SecondInfo::isEqual(LHS.second, RHS.second);\n  }\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_DENSE_MAP_INFO_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_errno.cpp",
    "content": "//===-- sanitizer_errno.cpp -------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizers run-time libraries.\n//\n// Defines errno to avoid including errno.h and its dependencies into other\n// files (e.g. interceptors are not supposed to include any system headers).\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_errno_codes.h\"\n#include \"sanitizer_internal_defs.h\"\n\n#include <errno.h>\n\nnamespace __sanitizer {\n\nCOMPILER_CHECK(errno_ENOMEM == ENOMEM);\nCOMPILER_CHECK(errno_EBUSY == EBUSY);\nCOMPILER_CHECK(errno_EINVAL == EINVAL);\n\n// EOWNERDEAD is not present in some older platforms.\n#if defined(EOWNERDEAD)\nextern const int errno_EOWNERDEAD = EOWNERDEAD;\n#else\nextern const int errno_EOWNERDEAD = -1;\n#endif\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_errno.h",
    "content": "//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizers run-time libraries.\n//\n// Defines errno to avoid including errno.h and its dependencies into sensitive\n// files (e.g. interceptors are not supposed to include any system headers).\n// It's ok to use errno.h directly when your file already depend on other system\n// includes though.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ERRNO_H\n#define SANITIZER_ERRNO_H\n\n#include \"sanitizer_errno_codes.h\"\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FREEBSD || SANITIZER_MAC\n#  define __errno_location __error\n#elif SANITIZER_ANDROID || SANITIZER_NETBSD\n#  define __errno_location __errno\n#elif SANITIZER_SOLARIS\n#  define __errno_location ___errno\n#elif SANITIZER_WINDOWS\n#  define __errno_location _errno\n#endif\n\nextern \"C\" int *__errno_location();\n\n#define errno (*__errno_location())\n\n#endif  // SANITIZER_ERRNO_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_errno_codes.h",
    "content": "//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizers run-time libraries.\n//\n// Defines errno codes to avoid including errno.h and its dependencies into\n// sensitive files (e.g. interceptors are not supposed to include any system\n// headers).\n// It's ok to use errno.h directly when your file already depend on other system\n// includes though.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_ERRNO_CODES_H\n#define SANITIZER_ERRNO_CODES_H\n\nnamespace __sanitizer {\n\n#define errno_ENOMEM 12\n#define errno_EBUSY 16\n#define errno_EINVAL 22\n#define errno_ENAMETOOLONG 36\n\n// Those might not present or their value differ on different platforms.\nextern const int errno_EOWNERDEAD;\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_ERRNO_CODES_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_file.cpp",
    "content": "//===-- sanitizer_file.cpp -----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===---------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.  It defines filesystem-related interfaces.  This\n// is separate from sanitizer_common.cpp so that it's simpler to disable\n// all the filesystem support code for a port that doesn't use it.\n//\n//===---------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if !SANITIZER_FUCHSIA\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n\nnamespace __sanitizer {\n\nvoid CatastrophicErrorWrite(const char *buffer, uptr length) {\n  WriteToFile(kStderrFd, buffer, length);\n}\n\nStaticSpinMutex report_file_mu;\nReportFile report_file = {&report_file_mu, kStderrFd, \"\", \"\", 0};\n\nvoid RawWrite(const char *buffer) {\n  report_file.Write(buffer, internal_strlen(buffer));\n}\n\nvoid ReportFile::ReopenIfNecessary() {\n  mu->CheckLocked();\n  if (fd == kStdoutFd || fd == kStderrFd) return;\n\n  uptr pid = internal_getpid();\n  // If in tracer, use the parent's file.\n  if (pid == stoptheworld_tracer_pid)\n    pid = stoptheworld_tracer_ppid;\n  if (fd != kInvalidFd) {\n    // If the report file is already opened by the current process,\n    // do nothing. Otherwise the report file was opened by the parent\n    // process, close it now.\n    if (fd_pid == pid)\n      return;\n    else\n      CloseFile(fd);\n  }\n\n  const char *exe_name = GetProcessName();\n  if (common_flags()->log_exe_name && exe_name) {\n    internal_snprintf(full_path, kMaxPathLength, \"%s.%s.%zu\", path_prefix,\n                      exe_name, pid);\n  } else {\n    internal_snprintf(full_path, kMaxPathLength, \"%s.%zu\", path_prefix, pid);\n  }\n  if (common_flags()->log_suffix) {\n    internal_strlcat(full_path, common_flags()->log_suffix, kMaxPathLength);\n  }\n  error_t err;\n  fd = OpenFile(full_path, WrOnly, &err);\n  if (fd == kInvalidFd) {\n    const char *ErrorMsgPrefix = \"ERROR: Can't open file: \";\n    WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));\n    WriteToFile(kStderrFd, full_path, internal_strlen(full_path));\n    char errmsg[100];\n    internal_snprintf(errmsg, sizeof(errmsg), \" (reason: %d)\", err);\n    WriteToFile(kStderrFd, errmsg, internal_strlen(errmsg));\n    Die();\n  }\n  fd_pid = pid;\n}\n\nstatic void RecursiveCreateParentDirs(char *path) {\n  if (path[0] == '\\0')\n    return;\n  for (int i = 1; path[i] != '\\0'; ++i) {\n    char save = path[i];\n    if (!IsPathSeparator(path[i]))\n      continue;\n    path[i] = '\\0';\n    /* Some of these will fail, because the directory exists, ignore it. */\n    CreateDir(path);\n    path[i] = save;\n  }\n}\n\nvoid ReportFile::SetReportPath(const char *path) {\n  if (path) {\n    uptr len = internal_strlen(path);\n    if (len > sizeof(path_prefix) - 100) {\n      Report(\"ERROR: Path is too long: %c%c%c%c%c%c%c%c...\\n\", path[0], path[1],\n             path[2], path[3], path[4], path[5], path[6], path[7]);\n      Die();\n    }\n  }\n\n  SpinMutexLock l(mu);\n  if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)\n    CloseFile(fd);\n  fd = kInvalidFd;\n  if (!path || internal_strcmp(path, \"stderr\") == 0) {\n    fd = kStderrFd;\n  } else if (internal_strcmp(path, \"stdout\") == 0) {\n    fd = kStdoutFd;\n  } else {\n    internal_snprintf(path_prefix, kMaxPathLength, \"%s\", path);\n    RecursiveCreateParentDirs(path_prefix);\n  }\n}\n\nconst char *ReportFile::GetReportPath() {\n  SpinMutexLock l(mu);\n  ReopenIfNecessary();\n  return full_path;\n}\n\nbool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,\n                      uptr *read_len, uptr max_len, error_t *errno_p) {\n  *buff = nullptr;\n  *buff_size = 0;\n  *read_len = 0;\n  if (!max_len)\n    return true;\n  uptr PageSize = GetPageSizeCached();\n  uptr kMinFileLen = Min(PageSize, max_len);\n\n  // The files we usually open are not seekable, so try different buffer sizes.\n  for (uptr size = kMinFileLen;; size = Min(size * 2, max_len)) {\n    UnmapOrDie(*buff, *buff_size);\n    *buff = (char*)MmapOrDie(size, __func__);\n    *buff_size = size;\n    fd_t fd = OpenFile(file_name, RdOnly, errno_p);\n    if (fd == kInvalidFd) {\n      UnmapOrDie(*buff, *buff_size);\n      return false;\n    }\n    *read_len = 0;\n    // Read up to one page at a time.\n    bool reached_eof = false;\n    while (*read_len < size) {\n      uptr just_read;\n      if (!ReadFromFile(fd, *buff + *read_len, size - *read_len, &just_read,\n                        errno_p)) {\n        UnmapOrDie(*buff, *buff_size);\n        CloseFile(fd);\n        return false;\n      }\n      *read_len += just_read;\n      if (just_read == 0 || *read_len == max_len) {\n        reached_eof = true;\n        break;\n      }\n    }\n    CloseFile(fd);\n    if (reached_eof)  // We've read the whole file.\n      break;\n  }\n  return true;\n}\n\nbool ReadFileToVector(const char *file_name,\n                      InternalMmapVectorNoCtor<char> *buff, uptr max_len,\n                      error_t *errno_p) {\n  buff->clear();\n  if (!max_len)\n    return true;\n  uptr PageSize = GetPageSizeCached();\n  fd_t fd = OpenFile(file_name, RdOnly, errno_p);\n  if (fd == kInvalidFd)\n    return false;\n  uptr read_len = 0;\n  while (read_len < max_len) {\n    if (read_len >= buff->size())\n      buff->resize(Min(Max(PageSize, read_len * 2), max_len));\n    CHECK_LT(read_len, buff->size());\n    CHECK_LE(buff->size(), max_len);\n    uptr just_read;\n    if (!ReadFromFile(fd, buff->data() + read_len, buff->size() - read_len,\n                      &just_read, errno_p)) {\n      CloseFile(fd);\n      return false;\n    }\n    read_len += just_read;\n    if (!just_read)\n      break;\n  }\n  CloseFile(fd);\n  buff->resize(read_len);\n  return true;\n}\n\nstatic const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';\n\nchar *FindPathToBinary(const char *name) {\n  if (FileExists(name)) {\n    return internal_strdup(name);\n  }\n\n  const char *path = GetEnv(\"PATH\");\n  if (!path)\n    return nullptr;\n  uptr name_len = internal_strlen(name);\n  InternalMmapVector<char> buffer(kMaxPathLength);\n  const char *beg = path;\n  while (true) {\n    const char *end = internal_strchrnul(beg, kPathSeparator);\n    uptr prefix_len = end - beg;\n    if (prefix_len + name_len + 2 <= kMaxPathLength) {\n      internal_memcpy(buffer.data(), beg, prefix_len);\n      buffer[prefix_len] = '/';\n      internal_memcpy(&buffer[prefix_len + 1], name, name_len);\n      buffer[prefix_len + 1 + name_len] = '\\0';\n      if (FileExists(buffer.data()))\n        return internal_strdup(buffer.data());\n    }\n    if (*end == '\\0') break;\n    beg = end + 1;\n  }\n  return nullptr;\n}\n\n} // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nextern \"C\" {\nvoid __sanitizer_set_report_path(const char *path) {\n  report_file.SetReportPath(path);\n}\n\nvoid __sanitizer_set_report_fd(void *fd) {\n  report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);\n  report_file.fd_pid = internal_getpid();\n}\n\nconst char *__sanitizer_get_report_path() {\n  return report_file.GetReportPath();\n}\n} // extern \"C\"\n\n#endif  // !SANITIZER_FUCHSIA\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_file.h",
    "content": "//===-- sanitizer_file.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===---------------------------------------------------------------------===//\n//\n// This file is shared between run-time libraries of sanitizers.\n// It declares filesystem-related interfaces.  This is separate from\n// sanitizer_common.h so that it's simpler to disable all the filesystem\n// support code for a port that doesn't use it.\n//\n//===---------------------------------------------------------------------===//\n#ifndef SANITIZER_FILE_H\n#define SANITIZER_FILE_H\n\n#include \"sanitizer_interface_internal.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\nstruct ReportFile {\n  void Write(const char *buffer, uptr length);\n  bool SupportsColors();\n  void SetReportPath(const char *path);\n  const char *GetReportPath();\n\n  // Don't use fields directly. They are only declared public to allow\n  // aggregate initialization.\n\n  // Protects fields below.\n  StaticSpinMutex *mu;\n  // Opened file descriptor. Defaults to stderr. It may be equal to\n  // kInvalidFd, in which case new file will be opened when necessary.\n  fd_t fd;\n  // Path prefix of report file, set via __sanitizer_set_report_path.\n  char path_prefix[kMaxPathLength];\n  // Full path to report, obtained as <path_prefix>.PID\n  char full_path[kMaxPathLength];\n  // PID of the process that opened fd. If a fork() occurs,\n  // the PID of child will be different from fd_pid.\n  uptr fd_pid;\n\n private:\n  void ReopenIfNecessary();\n};\nextern ReportFile report_file;\n\nenum FileAccessMode {\n  RdOnly,\n  WrOnly,\n  RdWr\n};\n\n// Returns kInvalidFd on error.\nfd_t OpenFile(const char *filename, FileAccessMode mode,\n              error_t *errno_p = nullptr);\nvoid CloseFile(fd_t);\n\n// Return true on success, false on error.\nbool ReadFromFile(fd_t fd, void *buff, uptr buff_size,\n                  uptr *bytes_read = nullptr, error_t *error_p = nullptr);\nbool WriteToFile(fd_t fd, const void *buff, uptr buff_size,\n                 uptr *bytes_written = nullptr, error_t *error_p = nullptr);\n\n// Scoped file handle closer.\nstruct FileCloser {\n  explicit FileCloser(fd_t fd) : fd(fd) {}\n  ~FileCloser() { CloseFile(fd); }\n  fd_t fd;\n};\n\nbool SupportsColoredOutput(fd_t fd);\n\n// OS\nconst char *GetPwd();\nbool FileExists(const char *filename);\nchar *FindPathToBinary(const char *name);\nbool IsPathSeparator(const char c);\nbool IsAbsolutePath(const char *path);\n// Returns true on success, false on failure.\nbool CreateDir(const char *pathname);\n// Starts a subprocess and returs its pid.\n// If *_fd parameters are not kInvalidFd their corresponding input/output\n// streams will be redirect to the file. The files will always be closed\n// in parent process even in case of an error.\n// The child process will close all fds after STDERR_FILENO\n// before passing control to a program.\npid_t StartSubprocess(const char *filename, const char *const argv[],\n                      const char *const envp[], fd_t stdin_fd = kInvalidFd,\n                      fd_t stdout_fd = kInvalidFd, fd_t stderr_fd = kInvalidFd);\n// Checks if specified process is still running\nbool IsProcessRunning(pid_t pid);\n// Waits for the process to finish and returns its exit code.\n// Returns -1 in case of an error.\nint WaitForProcess(pid_t pid);\n\n// Maps given file to virtual memory, and returns pointer to it\n// (or NULL if mapping fails). Stores the size of mmaped region\n// in '*buff_size'.\nvoid *MapFileToMemory(const char *file_name, uptr *buff_size);\nvoid *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FILE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_flag_parser.cpp",
    "content": "//===-- sanitizer_flag_parser.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_flag_parser.h\"\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_flag_parser.h\"\n\nnamespace __sanitizer {\n\nLowLevelAllocator FlagParser::Alloc;\n\nclass UnknownFlags {\n  static const int kMaxUnknownFlags = 20;\n  const char *unknown_flags_[kMaxUnknownFlags];\n  int n_unknown_flags_;\n\n public:\n  void Add(const char *name) {\n    CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);\n    unknown_flags_[n_unknown_flags_++] = name;\n  }\n\n  void Report() {\n    if (!n_unknown_flags_) return;\n    Printf(\"WARNING: found %d unrecognized flag(s):\\n\", n_unknown_flags_);\n    for (int i = 0; i < n_unknown_flags_; ++i)\n      Printf(\"    %s\\n\", unknown_flags_[i]);\n    n_unknown_flags_ = 0;\n  }\n};\n\nUnknownFlags unknown_flags;\n\nvoid ReportUnrecognizedFlags() {\n  unknown_flags.Report();\n}\n\nchar *FlagParser::ll_strndup(const char *s, uptr n) {\n  uptr len = internal_strnlen(s, n);\n  char *s2 = (char*)Alloc.Allocate(len + 1);\n  internal_memcpy(s2, s, len);\n  s2[len] = 0;\n  return s2;\n}\n\nvoid FlagParser::PrintFlagDescriptions() {\n  char buffer[128];\n  buffer[sizeof(buffer) - 1] = '\\0';\n  Printf(\"Available flags for %s:\\n\", SanitizerToolName);\n  for (int i = 0; i < n_flags_; ++i) {\n    bool truncated = !(flags_[i].handler->Format(buffer, sizeof(buffer)));\n    CHECK_EQ(buffer[sizeof(buffer) - 1], '\\0');\n    const char *truncation_str = truncated ? \" Truncated\" : \"\";\n    Printf(\"\\t%s\\n\\t\\t- %s (Current Value%s: %s)\\n\", flags_[i].name,\n           flags_[i].desc, truncation_str, buffer);\n  }\n}\n\nvoid FlagParser::fatal_error(const char *err) {\n  Printf(\"%s: ERROR: %s\\n\", SanitizerToolName, err);\n  Die();\n}\n\nbool FlagParser::is_space(char c) {\n  return c == ' ' || c == ',' || c == ':' || c == '\\n' || c == '\\t' ||\n         c == '\\r';\n}\n\nvoid FlagParser::skip_whitespace() {\n  while (is_space(buf_[pos_])) ++pos_;\n}\n\nvoid FlagParser::parse_flag(const char *env_option_name) {\n  uptr name_start = pos_;\n  while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;\n  if (buf_[pos_] != '=') {\n    if (env_option_name) {\n      Printf(\"%s: ERROR: expected '=' in %s\\n\", SanitizerToolName,\n             env_option_name);\n      Die();\n    } else {\n      fatal_error(\"expected '='\");\n    }\n  }\n  char *name = ll_strndup(buf_ + name_start, pos_ - name_start);\n\n  uptr value_start = ++pos_;\n  char *value;\n  if (buf_[pos_] == '\\'' || buf_[pos_] == '\"') {\n    char quote = buf_[pos_++];\n    while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;\n    if (buf_[pos_] == 0) fatal_error(\"unterminated string\");\n    value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);\n    ++pos_; // consume the closing quote\n  } else {\n    while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;\n    if (buf_[pos_] != 0 && !is_space(buf_[pos_]))\n      fatal_error(\"expected separator or eol\");\n    value = ll_strndup(buf_ + value_start, pos_ - value_start);\n  }\n\n  bool res = run_handler(name, value);\n  if (!res) fatal_error(\"Flag parsing failed.\");\n}\n\nvoid FlagParser::parse_flags(const char *env_option_name) {\n  while (true) {\n    skip_whitespace();\n    if (buf_[pos_] == 0) break;\n    parse_flag(env_option_name);\n  }\n\n  // Do a sanity check for certain flags.\n  if (common_flags_dont_use.malloc_context_size < 1)\n    common_flags_dont_use.malloc_context_size = 1;\n}\n\nvoid FlagParser::ParseStringFromEnv(const char *env_name) {\n  const char *env = GetEnv(env_name);\n  VPrintf(1, \"%s: %s\\n\", env_name, env ? env : \"<empty>\");\n  ParseString(env, env_name);\n}\n\nvoid FlagParser::ParseString(const char *s, const char *env_option_name) {\n  if (!s) return;\n  // Backup current parser state to allow nested ParseString() calls.\n  const char *old_buf_ = buf_;\n  uptr old_pos_ = pos_;\n  buf_ = s;\n  pos_ = 0;\n\n  parse_flags(env_option_name);\n\n  buf_ = old_buf_;\n  pos_ = old_pos_;\n}\n\nbool FlagParser::ParseFile(const char *path, bool ignore_missing) {\n  static const uptr kMaxIncludeSize = 1 << 15;\n  char *data;\n  uptr data_mapped_size;\n  error_t err;\n  uptr len;\n  if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,\n                        Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {\n    if (ignore_missing)\n      return true;\n    Printf(\"Failed to read options from '%s': error %d\\n\", path, err);\n    return false;\n  }\n  ParseString(data, path);\n  UnmapOrDie(data, data_mapped_size);\n  return true;\n}\n\nbool FlagParser::run_handler(const char *name, const char *value) {\n  for (int i = 0; i < n_flags_; ++i) {\n    if (internal_strcmp(name, flags_[i].name) == 0)\n      return flags_[i].handler->Parse(value);\n  }\n  // Unrecognized flag. This is not a fatal error, we may print a warning later.\n  unknown_flags.Add(name);\n  return true;\n}\n\nvoid FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,\n                                 const char *desc) {\n  CHECK_LT(n_flags_, kMaxFlags);\n  flags_[n_flags_].name = name;\n  flags_[n_flags_].desc = desc;\n  flags_[n_flags_].handler = handler;\n  ++n_flags_;\n}\n\nFlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {\n  flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_flag_parser.h",
    "content": "//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_FLAG_REGISTRY_H\n#define SANITIZER_FLAG_REGISTRY_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\nclass FlagHandlerBase {\n public:\n  virtual bool Parse(const char *value) { return false; }\n  // Write the C string representation of the current value (truncated to fit)\n  // into the buffer of size `size`. Returns false if truncation occurred and\n  // returns true otherwise.\n  virtual bool Format(char *buffer, uptr size) {\n    if (size > 0)\n      buffer[0] = '\\0';\n    return false;\n  }\n\n protected:\n  ~FlagHandlerBase() {}\n\n  inline bool FormatString(char *buffer, uptr size, const char *str_to_use) {\n    uptr num_symbols_should_write =\n        internal_snprintf(buffer, size, \"%s\", str_to_use);\n    return num_symbols_should_write < size;\n  }\n};\n\ntemplate <typename T>\nclass FlagHandler final : public FlagHandlerBase {\n  T *t_;\n\n public:\n  explicit FlagHandler(T *t) : t_(t) {}\n  bool Parse(const char *value) final;\n  bool Format(char *buffer, uptr size) final;\n};\n\ninline bool ParseBool(const char *value, bool *b) {\n  if (internal_strcmp(value, \"0\") == 0 ||\n      internal_strcmp(value, \"no\") == 0 ||\n      internal_strcmp(value, \"false\") == 0) {\n    *b = false;\n    return true;\n  }\n  if (internal_strcmp(value, \"1\") == 0 ||\n      internal_strcmp(value, \"yes\") == 0 ||\n      internal_strcmp(value, \"true\") == 0) {\n    *b = true;\n    return true;\n  }\n  return false;\n}\n\ntemplate <>\ninline bool FlagHandler<bool>::Parse(const char *value) {\n  if (ParseBool(value, t_)) return true;\n  Printf(\"ERROR: Invalid value for bool option: '%s'\\n\", value);\n  return false;\n}\n\ntemplate <>\ninline bool FlagHandler<bool>::Format(char *buffer, uptr size) {\n  return FormatString(buffer, size, *t_ ? \"true\" : \"false\");\n}\n\ntemplate <>\ninline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {\n  bool b;\n  if (ParseBool(value, &b)) {\n    *t_ = b ? kHandleSignalYes : kHandleSignalNo;\n    return true;\n  }\n  if (internal_strcmp(value, \"2\") == 0 ||\n      internal_strcmp(value, \"exclusive\") == 0) {\n    *t_ = kHandleSignalExclusive;\n    return true;\n  }\n  Printf(\"ERROR: Invalid value for signal handler option: '%s'\\n\", value);\n  return false;\n}\n\ntemplate <>\ninline bool FlagHandler<HandleSignalMode>::Format(char *buffer, uptr size) {\n  uptr num_symbols_should_write = internal_snprintf(buffer, size, \"%d\", *t_);\n  return num_symbols_should_write < size;\n}\n\ntemplate <>\ninline bool FlagHandler<const char *>::Parse(const char *value) {\n  *t_ = value;\n  return true;\n}\n\ntemplate <>\ninline bool FlagHandler<const char *>::Format(char *buffer, uptr size) {\n  return FormatString(buffer, size, *t_);\n}\n\ntemplate <>\ninline bool FlagHandler<int>::Parse(const char *value) {\n  const char *value_end;\n  *t_ = internal_simple_strtoll(value, &value_end, 10);\n  bool ok = *value_end == 0;\n  if (!ok) Printf(\"ERROR: Invalid value for int option: '%s'\\n\", value);\n  return ok;\n}\n\ntemplate <>\ninline bool FlagHandler<int>::Format(char *buffer, uptr size) {\n  uptr num_symbols_should_write = internal_snprintf(buffer, size, \"%d\", *t_);\n  return num_symbols_should_write < size;\n}\n\ntemplate <>\ninline bool FlagHandler<uptr>::Parse(const char *value) {\n  const char *value_end;\n  *t_ = internal_simple_strtoll(value, &value_end, 10);\n  bool ok = *value_end == 0;\n  if (!ok) Printf(\"ERROR: Invalid value for uptr option: '%s'\\n\", value);\n  return ok;\n}\n\ntemplate <>\ninline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {\n  uptr num_symbols_should_write = internal_snprintf(buffer, size, \"0x%zx\", *t_);\n  return num_symbols_should_write < size;\n}\n\ntemplate <>\ninline bool FlagHandler<s64>::Parse(const char *value) {\n  const char *value_end;\n  *t_ = internal_simple_strtoll(value, &value_end, 10);\n  bool ok = *value_end == 0;\n  if (!ok) Printf(\"ERROR: Invalid value for s64 option: '%s'\\n\", value);\n  return ok;\n}\n\ntemplate <>\ninline bool FlagHandler<s64>::Format(char *buffer, uptr size) {\n  uptr num_symbols_should_write = internal_snprintf(buffer, size, \"%lld\", *t_);\n  return num_symbols_should_write < size;\n}\n\nclass FlagParser {\n  static const int kMaxFlags = 200;\n  struct Flag {\n    const char *name;\n    const char *desc;\n    FlagHandlerBase *handler;\n  } *flags_;\n  int n_flags_;\n\n  const char *buf_;\n  uptr pos_;\n\n public:\n  FlagParser();\n  void RegisterHandler(const char *name, FlagHandlerBase *handler,\n                       const char *desc);\n  void ParseString(const char *s, const char *env_name = 0);\n  void ParseStringFromEnv(const char *env_name);\n  bool ParseFile(const char *path, bool ignore_missing);\n  void PrintFlagDescriptions();\n\n  static LowLevelAllocator Alloc;\n\n private:\n  void fatal_error(const char *err);\n  bool is_space(char c);\n  void skip_whitespace();\n  void parse_flags(const char *env_option_name);\n  void parse_flag(const char *env_option_name);\n  bool run_handler(const char *name, const char *value);\n  char *ll_strndup(const char *s, uptr n);\n};\n\ntemplate <typename T>\nstatic void RegisterFlag(FlagParser *parser, const char *name, const char *desc,\n                         T *var) {\n  FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var);\n  parser->RegisterHandler(name, fh, desc);\n}\n\nvoid ReportUnrecognizedFlags();\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FLAG_REGISTRY_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_flags.cpp",
    "content": "//===-- sanitizer_flags.cpp -----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_flags.h\"\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flag_parser.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_list.h\"\n\nnamespace __sanitizer {\n\nCommonFlags common_flags_dont_use;\n\nvoid CommonFlags::SetDefaults() {\n#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;\n#include \"sanitizer_flags.inc\"\n#undef COMMON_FLAG\n}\n\nvoid CommonFlags::CopyFrom(const CommonFlags &other) {\n  internal_memcpy(this, &other, sizeof(*this));\n}\n\n// Copy the string from \"s\" to \"out\", making the following substitutions:\n// %b = binary basename\n// %p = pid\n// %d = binary directory\nvoid SubstituteForFlagValue(const char *s, char *out, uptr out_size) {\n  char *out_end = out + out_size;\n  while (*s && out < out_end - 1) {\n    if (s[0] != '%') {\n      *out++ = *s++;\n      continue;\n    }\n    switch (s[1]) {\n      case 'b': {\n        const char *base = GetProcessName();\n        CHECK(base);\n        while (*base && out < out_end - 1)\n          *out++ = *base++;\n        s += 2; // skip \"%b\"\n        break;\n      }\n      case 'p': {\n        int pid = internal_getpid();\n        char buf[32];\n        char *buf_pos = buf + 32;\n        do {\n          *--buf_pos = (pid % 10) + '0';\n          pid /= 10;\n        } while (pid);\n        while (buf_pos < buf + 32 && out < out_end - 1)\n          *out++ = *buf_pos++;\n        s += 2; // skip \"%p\"\n        break;\n      }\n      case 'd': {\n        uptr len = ReadBinaryDir(out, out_end - out);\n        out += len;\n        s += 2;  // skip \"%d\"\n        break;\n      }\n      default:\n        *out++ = *s++;\n        break;\n    }\n  }\n  CHECK(out < out_end - 1);\n  *out = '\\0';\n}\n\nclass FlagHandlerInclude final : public FlagHandlerBase {\n  FlagParser *parser_;\n  bool ignore_missing_;\n  const char *original_path_;\n\n public:\n  explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)\n      : parser_(parser), ignore_missing_(ignore_missing), original_path_(\"\") {}\n  bool Parse(const char *value) final {\n    original_path_ = value;\n    if (internal_strchr(value, '%')) {\n      char *buf = (char *)MmapOrDie(kMaxPathLength, \"FlagHandlerInclude\");\n      SubstituteForFlagValue(value, buf, kMaxPathLength);\n      bool res = parser_->ParseFile(buf, ignore_missing_);\n      UnmapOrDie(buf, kMaxPathLength);\n      return res;\n    }\n    return parser_->ParseFile(value, ignore_missing_);\n  }\n  bool Format(char *buffer, uptr size) override {\n    // Note `original_path_` isn't actually what's parsed due to `%`\n    // substitutions. Printing the substituted path would require holding onto\n    // mmap'ed memory.\n    return FormatString(buffer, size, original_path_);\n  }\n};\n\nvoid RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {\n  FlagHandlerInclude *fh_include = new (FlagParser::Alloc)\n      FlagHandlerInclude(parser, /*ignore_missing*/ false);\n  parser->RegisterHandler(\"include\", fh_include,\n                          \"read more options from the given file\");\n  FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc)\n      FlagHandlerInclude(parser, /*ignore_missing*/ true);\n  parser->RegisterHandler(\n      \"include_if_exists\", fh_include_if_exists,\n      \"read more options from the given file (if it exists)\");\n}\n\nvoid RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {\n#define COMMON_FLAG(Type, Name, DefaultValue, Description) \\\n  RegisterFlag(parser, #Name, Description, &cf->Name);\n#include \"sanitizer_flags.inc\"\n#undef COMMON_FLAG\n\n  RegisterIncludeFlags(parser, cf);\n}\n\nvoid InitializeCommonFlags(CommonFlags *cf) {\n  // need to record coverage to generate coverage report.\n  cf->coverage |= cf->html_cov_report;\n  SetVerbosity(cf->verbosity);\n\n  InitializePlatformCommonFlags(cf);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_flags.h",
    "content": "//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_FLAGS_H\n#define SANITIZER_FLAGS_H\n\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\nenum HandleSignalMode {\n  kHandleSignalNo,\n  kHandleSignalYes,\n  kHandleSignalExclusive,\n};\n\nstruct CommonFlags {\n#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;\n#include \"sanitizer_flags.inc\"\n#undef COMMON_FLAG\n\n  void SetDefaults();\n  void CopyFrom(const CommonFlags &other);\n};\n\n// Functions to get/set global CommonFlags shared by all sanitizer runtimes:\nextern CommonFlags common_flags_dont_use;\ninline const CommonFlags *common_flags() {\n  return &common_flags_dont_use;\n}\n\ninline void SetCommonFlagsDefaults() {\n  common_flags_dont_use.SetDefaults();\n}\n\n// This function can only be used to setup tool-specific overrides for\n// CommonFlags defaults. Generally, it should only be used right after\n// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and\n// only during the flags initialization (i.e. before they are used for\n// the first time).\ninline void OverrideCommonFlags(const CommonFlags &cf) {\n  common_flags_dont_use.CopyFrom(cf);\n}\n\nvoid SubstituteForFlagValue(const char *s, char *out, uptr out_size);\n\nclass FlagParser;\nvoid RegisterCommonFlags(FlagParser *parser,\n                         CommonFlags *cf = &common_flags_dont_use);\nvoid RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);\n\n// Should be called after parsing all flags. Sets up common flag values\n// and perform initializations common to all sanitizers (e.g. setting\n// verbosity).\nvoid InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);\n\n// Platform specific flags initialization.\nvoid InitializePlatformCommonFlags(CommonFlags *cf);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FLAGS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_flags.inc",
    "content": "//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file describes common flags available in all sanitizers.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef COMMON_FLAG\n#error \"Define COMMON_FLAG prior to including this file!\"\n#endif\n\n// COMMON_FLAG(Type, Name, DefaultValue, Description)\n// Supported types: bool, const char *, int, uptr.\n// Default value must be a compile-time constant.\n// Description must be a string literal.\n\nCOMMON_FLAG(\n    bool, symbolize, true,\n    \"If set, use the online symbolizer from common sanitizer runtime to turn \"\n    \"virtual addresses to file/line locations.\")\nCOMMON_FLAG(\n    const char *, external_symbolizer_path, nullptr,\n    \"Path to external symbolizer. If empty, the tool will search $PATH for \"\n    \"the symbolizer.\")\nCOMMON_FLAG(\n    bool, allow_addr2line, false,\n    \"If set, allows online symbolizer to run addr2line binary to symbolize \"\n    \"stack traces (addr2line will only be used if llvm-symbolizer binary is \"\n    \"unavailable.\")\nCOMMON_FLAG(const char *, strip_path_prefix, \"\",\n            \"Strips this prefix from file paths in error reports.\")\nCOMMON_FLAG(bool, fast_unwind_on_check, false,\n            \"If available, use the fast frame-pointer-based unwinder on \"\n            \"internal CHECK failures.\")\nCOMMON_FLAG(bool, fast_unwind_on_fatal, false,\n            \"If available, use the fast frame-pointer-based unwinder on fatal \"\n            \"errors.\")\n// ARM thumb/thumb2 frame pointer is inconsistent on GCC and Clang [1]\n// and fast-unwider is also unreliable with mixing arm and thumb code [2].\n// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92172\n// [2] https://bugs.llvm.org/show_bug.cgi?id=44158\nCOMMON_FLAG(bool, fast_unwind_on_malloc,\n            !(SANITIZER_LINUX && !SANITIZER_ANDROID && SANITIZER_ARM),\n            \"If available, use the fast frame-pointer-based unwinder on \"\n            \"malloc/free.\")\nCOMMON_FLAG(bool, handle_ioctl, false, \"Intercept and handle ioctl requests.\")\nCOMMON_FLAG(int, malloc_context_size, 1,\n            \"Max number of stack frames kept for each allocation/deallocation.\")\nCOMMON_FLAG(\n    const char *, log_path, nullptr,\n    \"Write logs to \\\"log_path.pid\\\". The special values are \\\"stdout\\\" and \"\n    \"\\\"stderr\\\". If unspecified, defaults to \\\"stderr\\\".\")\nCOMMON_FLAG(\n    bool, log_exe_name, false,\n    \"Mention name of executable when reporting error and \"\n    \"append executable name to logs (as in \\\"log_path.exe_name.pid\\\").\")\nCOMMON_FLAG(const char *, log_suffix, nullptr,\n            \"String to append to log file name, e.g. \\\".txt\\\".\")\nCOMMON_FLAG(\n    bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,\n    \"Write all sanitizer output to syslog in addition to other means of \"\n    \"logging.\")\nCOMMON_FLAG(\n    int, verbosity, 0,\n    \"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).\")\nCOMMON_FLAG(bool, strip_env, 1,\n            \"Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to \"\n            \"avoid passing it to children. Default is true.\")\nCOMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, \"Enable memory leak detection.\")\nCOMMON_FLAG(\n    bool, leak_check_at_exit, true,\n    \"Invoke leak checking in an atexit handler. Has no effect if \"\n    \"detect_leaks=false, or if __lsan_do_leak_check() is called before the \"\n    \"handler has a chance to run.\")\nCOMMON_FLAG(bool, allocator_may_return_null, false,\n            \"If false, the allocator will crash instead of returning 0 on \"\n            \"out-of-memory.\")\nCOMMON_FLAG(bool, print_summary, true,\n            \"If false, disable printing error summaries in addition to error \"\n            \"reports.\")\nCOMMON_FLAG(int, print_module_map, 0,\n            \"Print the process module map where supported (0 - don't print, \"\n            \"1 - print only once before process exits, 2 - print after each \"\n            \"report).\")\nCOMMON_FLAG(bool, check_printf, true, \"Check printf arguments.\")\n#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \\\n    \"Controls custom tool's \" #signal \" handler (0 - do not registers the \" \\\n    \"handler, 1 - register the handler and allow user to set own, \" \\\n    \"2 - registers the handler and block user from changing it). \"\nCOMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,\n            COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))\nCOMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,\n            COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGBUS))\nCOMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo,\n            COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT))\nCOMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,\n            COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL))\nCOMMON_FLAG(HandleSignalMode, handle_sigtrap, kHandleSignalNo,\n            COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGTRAP))\nCOMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,\n            COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))\n#undef COMMON_FLAG_HANDLE_SIGNAL_HELP\nCOMMON_FLAG(bool, allow_user_segv_handler, true,\n            \"Deprecated. True has no effect, use handle_sigbus=1. If false, \"\n            \"handle_*=1 will be upgraded to handle_*=2.\")\nCOMMON_FLAG(bool, use_sigaltstack, true,\n            \"If set, uses alternate stack for signal handling.\")\nCOMMON_FLAG(bool, detect_deadlocks, true,\n            \"If set, deadlock detection is enabled.\")\nCOMMON_FLAG(\n    uptr, clear_shadow_mmap_threshold, 64 * 1024,\n    \"Large shadow regions are zero-filled using mmap(NORESERVE) instead of \"\n    \"memset(). This is the threshold size in bytes.\")\nCOMMON_FLAG(const char *, color, \"auto\",\n            \"Colorize reports: (always|never|auto).\")\nCOMMON_FLAG(\n    bool, legacy_pthread_cond, false,\n    \"Enables support for dynamic libraries linked with libpthread 2.2.5.\")\nCOMMON_FLAG(bool, intercept_tls_get_addr, false, \"Intercept __tls_get_addr.\")\nCOMMON_FLAG(bool, help, false, \"Print the flag descriptions.\")\nCOMMON_FLAG(uptr, mmap_limit_mb, 0,\n            \"Limit the amount of mmap-ed memory (excluding shadow) in Mb; \"\n            \"not a user-facing flag, used mosly for testing the tools\")\nCOMMON_FLAG(uptr, hard_rss_limit_mb, 0,\n            \"Hard RSS limit in Mb.\"\n            \" If non-zero, a background thread is spawned at startup\"\n            \" which periodically reads RSS and aborts the process if the\"\n            \" limit is reached\")\nCOMMON_FLAG(uptr, soft_rss_limit_mb, 0,\n            \"Soft RSS limit in Mb.\"\n            \" If non-zero, a background thread is spawned at startup\"\n            \" which periodically reads RSS. If the limit is reached\"\n            \" all subsequent malloc/new calls will fail or return NULL\"\n            \" (depending on the value of allocator_may_return_null)\"\n            \" until the RSS goes below the soft limit.\"\n            \" This limit does not affect memory allocations other than\"\n            \" malloc/new.\")\nCOMMON_FLAG(uptr, max_allocation_size_mb, 0,\n            \"If non-zero, malloc/new calls larger than this size will return \"\n            \"nullptr (or crash if allocator_may_return_null=false).\")\nCOMMON_FLAG(bool, heap_profile, false, \"Experimental heap profiler, asan-only\")\nCOMMON_FLAG(s32, allocator_release_to_os_interval_ms,\n            ((bool)SANITIZER_FUCHSIA || (bool)SANITIZER_WINDOWS) ? -1 : 5000,\n            \"Only affects a 64-bit allocator. If set, tries to release unused \"\n            \"memory to the OS, but not more often than this interval (in \"\n            \"milliseconds). Negative values mean do not attempt to release \"\n            \"memory to the OS.\\n\")\nCOMMON_FLAG(bool, can_use_proc_maps_statm, true,\n            \"If false, do not attempt to read /proc/maps/statm.\"\n            \" Mostly useful for testing sanitizers.\")\nCOMMON_FLAG(\n    bool, coverage, false,\n    \"If set, coverage information will be dumped at program shutdown (if the \"\n    \"coverage instrumentation was enabled at compile time).\")\nCOMMON_FLAG(const char *, coverage_dir, \".\",\n            \"Target directory for coverage dumps. Defaults to the current \"\n            \"directory.\")\nCOMMON_FLAG(const char *, cov_8bit_counters_out, \"\",\n    \"If non-empty, write 8bit counters to this file. \")\nCOMMON_FLAG(const char *, cov_pcs_out, \"\",\n    \"If non-empty, write the coverage pc table to this file. \")\nCOMMON_FLAG(bool, full_address_space, false,\n            \"Sanitize complete address space; \"\n            \"by default kernel area on 32-bit platforms will not be sanitized\")\nCOMMON_FLAG(bool, print_suppressions, true,\n            \"Print matched suppressions at exit.\")\nCOMMON_FLAG(\n    bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,\n    \"Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid\"\n    \" dumping a 16T+ core file. Ignored on OSes that don't dump core by\"\n    \" default and for sanitizers that don't reserve lots of virtual memory.\")\nCOMMON_FLAG(bool, use_madv_dontdump, true,\n          \"If set, instructs kernel to not store the (huge) shadow \"\n          \"in core file.\")\nCOMMON_FLAG(bool, symbolize_inline_frames, true,\n            \"Print inlined frames in stacktraces. Defaults to true.\")\nCOMMON_FLAG(bool, demangle, true, \"Print demangled symbols.\")\nCOMMON_FLAG(bool, symbolize_vs_style, false,\n            \"Print file locations in Visual Studio style (e.g: \"\n            \" file(10,42): ...\")\nCOMMON_FLAG(int, dedup_token_length, 0,\n            \"If positive, after printing a stack trace also print a short \"\n            \"string token based on this number of frames that will simplify \"\n            \"deduplication of the reports. \"\n            \"Example: 'DEDUP_TOKEN: foo-bar-main'. Default is 0.\")\nCOMMON_FLAG(const char *, stack_trace_format, \"DEFAULT\",\n            \"Format string used to render stack frames. \"\n            \"See sanitizer_stacktrace_printer.h for the format description. \"\n            \"Use DEFAULT to get default format.\")\nCOMMON_FLAG(int, compress_stack_depot, 0,\n            \"Compress stack depot to save memory.\")\nCOMMON_FLAG(bool, no_huge_pages_for_shadow, true,\n            \"If true, the shadow is not allowed to use huge pages. \")\nCOMMON_FLAG(bool, strict_string_checks, false,\n            \"If set check that string arguments are properly null-terminated\")\nCOMMON_FLAG(bool, intercept_strstr, true,\n            \"If set, uses custom wrappers for strstr and strcasestr functions \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, intercept_strspn, true,\n            \"If set, uses custom wrappers for strspn and strcspn function \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, intercept_strtok, true,\n            \"If set, uses a custom wrapper for the strtok function \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, intercept_strpbrk, true,\n            \"If set, uses custom wrappers for strpbrk function \"\n            \"to find more errors.\")\nCOMMON_FLAG(\n    bool, intercept_strcmp, true,\n    \"If set, uses custom wrappers for strcmp functions to find more errors.\")\nCOMMON_FLAG(bool, intercept_strlen, true,\n            \"If set, uses custom wrappers for strlen and strnlen functions \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, intercept_strndup, true,\n            \"If set, uses custom wrappers for strndup functions \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, intercept_strchr, true,\n            \"If set, uses custom wrappers for strchr, strchrnul, and strrchr \"\n            \"functions to find more errors.\")\nCOMMON_FLAG(bool, intercept_memcmp, true,\n            \"If set, uses custom wrappers for memcmp function \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, strict_memcmp, true,\n          \"If true, assume that memcmp(p1, p2, n) always reads n bytes before \"\n          \"comparing p1 and p2.\")\nCOMMON_FLAG(bool, intercept_memmem, true,\n            \"If set, uses a wrapper for memmem() to find more errors.\")\nCOMMON_FLAG(bool, intercept_intrin, true,\n            \"If set, uses custom wrappers for memset/memcpy/memmove \"\n            \"intrinsics to find more errors.\")\nCOMMON_FLAG(bool, intercept_stat, true,\n            \"If set, uses custom wrappers for *stat functions \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, intercept_send, true,\n            \"If set, uses custom wrappers for send* functions \"\n            \"to find more errors.\")\nCOMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,\n            \"If set, decorate sanitizer mappings in /proc/self/maps with \"\n            \"user-readable names\")\nCOMMON_FLAG(int, exitcode, 1, \"Override the program exit status if the tool \"\n                              \"found an error\")\nCOMMON_FLAG(\n    bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,\n    \"If set, the tool calls abort() instead of _exit() after printing the \"\n    \"error report.\")\nCOMMON_FLAG(bool, suppress_equal_pcs, true,\n            \"Deduplicate multiple reports for single source location in \"\n            \"halt_on_error=false mode (asan only).\")\nCOMMON_FLAG(bool, print_cmdline, false, \"Print command line on crash \"\n            \"(asan only).\")\nCOMMON_FLAG(bool, html_cov_report, false, \"Generate html coverage report.\")\nCOMMON_FLAG(const char *, sancov_path, \"sancov\", \"Sancov tool location.\")\nCOMMON_FLAG(bool, dump_instruction_bytes, false,\n          \"If true, dump 16 bytes starting at the instruction that caused SEGV\")\nCOMMON_FLAG(bool, dump_registers, true,\n          \"If true, dump values of CPU registers when SEGV happens. Only \"\n          \"available on OS X for now.\")\nCOMMON_FLAG(bool, detect_write_exec, false,\n          \"If true, triggers warning when writable-executable pages requests \"\n          \"are being made\")\nCOMMON_FLAG(bool, test_only_emulate_no_memorymap, false,\n            \"TEST ONLY fail to read memory mappings to emulate sanitized \"\n            \"\\\"init\\\"\")\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_flat_map.h",
    "content": "//===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Part of the Sanitizer Allocator.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_FLAT_MAP_H\n#define SANITIZER_FLAT_MAP_H\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_local_address_space_view.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\n// Call these callbacks on mmap/munmap.\nstruct NoOpMapUnmapCallback {\n  void OnMap(uptr p, uptr size) const {}\n  void OnUnmap(uptr p, uptr size) const {}\n};\n\n// Maps integers in rage [0, kSize) to values.\ntemplate <typename T, u64 kSize,\n          typename AddressSpaceViewTy = LocalAddressSpaceView>\nclass FlatMap {\n public:\n  using AddressSpaceView = AddressSpaceViewTy;\n  void Init() { internal_memset(map_, 0, sizeof(map_)); }\n\n  constexpr uptr size() const { return kSize; }\n\n  bool contains(uptr idx) const {\n    CHECK_LT(idx, kSize);\n    return true;\n  }\n\n  T &operator[](uptr idx) {\n    DCHECK_LT(idx, kSize);\n    return map_[idx];\n  }\n\n  const T &operator[](uptr idx) const {\n    DCHECK_LT(idx, kSize);\n    return map_[idx];\n  }\n\n private:\n  T map_[kSize];\n};\n\n// TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.\n// It is implemented as a two-dimensional array: array of kSize1 pointers\n// to kSize2-byte arrays. The secondary arrays are mmaped on demand.\n// Each value is initially zero and can be set to something else only once.\n// Setting and getting values from multiple threads is safe w/o extra locking.\ntemplate <typename T, u64 kSize1, u64 kSize2,\n          typename AddressSpaceViewTy = LocalAddressSpaceView,\n          class MapUnmapCallback = NoOpMapUnmapCallback>\nclass TwoLevelMap {\n  static_assert(IsPowerOfTwo(kSize2), \"Use a power of two for performance.\");\n\n public:\n  using AddressSpaceView = AddressSpaceViewTy;\n  void Init() {\n    mu_.Init();\n    internal_memset(map1_, 0, sizeof(map1_));\n  }\n\n  void TestOnlyUnmap() {\n    for (uptr i = 0; i < kSize1; i++) {\n      T *p = Get(i);\n      if (!p)\n        continue;\n      MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), MmapSize());\n      UnmapOrDie(p, kSize2);\n    }\n    Init();\n  }\n\n  uptr MemoryUsage() const {\n    uptr res = 0;\n    for (uptr i = 0; i < kSize1; i++) {\n      T *p = Get(i);\n      if (!p)\n        continue;\n      res += MmapSize();\n    }\n    return res;\n  }\n\n  constexpr uptr size() const { return kSize1 * kSize2; }\n  constexpr uptr size1() const { return kSize1; }\n  constexpr uptr size2() const { return kSize2; }\n\n  bool contains(uptr idx) const {\n    CHECK_LT(idx, kSize1 * kSize2);\n    return Get(idx / kSize2);\n  }\n\n  const T &operator[](uptr idx) const {\n    DCHECK_LT(idx, kSize1 * kSize2);\n    T *map2 = GetOrCreate(idx / kSize2);\n    return *AddressSpaceView::Load(&map2[idx % kSize2]);\n  }\n\n  T &operator[](uptr idx) {\n    DCHECK_LT(idx, kSize1 * kSize2);\n    T *map2 = GetOrCreate(idx / kSize2);\n    return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);\n  }\n\n private:\n  constexpr uptr MmapSize() const {\n    return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());\n  }\n\n  T *Get(uptr idx) const {\n    DCHECK_LT(idx, kSize1);\n    return reinterpret_cast<T *>(\n        atomic_load(&map1_[idx], memory_order_acquire));\n  }\n\n  T *GetOrCreate(uptr idx) const {\n    DCHECK_LT(idx, kSize1);\n    // This code needs to use memory_order_acquire/consume, but we use\n    // memory_order_relaxed for performance reasons (matters for arm64). We\n    // expect memory_order_relaxed to be effectively equivalent to\n    // memory_order_consume in this case for all relevant architectures: all\n    // dependent data is reachable only by dereferencing the resulting pointer.\n    // If relaxed load fails to see stored ptr, the code will fall back to\n    // Create() and reload the value again with locked mutex as a memory\n    // barrier.\n    T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx]));\n    if (LIKELY(res))\n      return res;\n    return Create(idx);\n  }\n\n  NOINLINE T *Create(uptr idx) const {\n    SpinMutexLock l(&mu_);\n    T *res = Get(idx);\n    if (!res) {\n      res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), \"TwoLevelMap\"));\n      MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);\n      atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),\n                   memory_order_release);\n    }\n    return res;\n  }\n\n  mutable StaticSpinMutex mu_;\n  mutable atomic_uintptr_t map1_[kSize1];\n};\n\ntemplate <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>\nusing FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;\n\ntemplate <u64 kSize1, u64 kSize2,\n          typename AddressSpaceViewTy = LocalAddressSpaceView,\n          class MapUnmapCallback = NoOpMapUnmapCallback>\nusing TwoLevelByteMap =\n    TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy, MapUnmapCallback>;\n}  // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_freebsd.h",
    "content": "//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime. It contains FreeBSD-specific\n// definitions.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_FREEBSD_H\n#define SANITIZER_FREEBSD_H\n\n#include \"sanitizer_internal_defs.h\"\n\n// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in\n// 32-bit mode.\n#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)\n#include <osreldate.h>\n#if __FreeBSD_version <= 902001  // v9.2\n#include <link.h>\n#include <sys/param.h>\n#include <ucontext.h>\n\nnamespace __sanitizer {\n\ntypedef unsigned long long __xuint64_t;\n\ntypedef __int32_t __xregister_t;\n\ntypedef struct __xmcontext {\n  __xregister_t mc_onstack;\n  __xregister_t mc_gs;\n  __xregister_t mc_fs;\n  __xregister_t mc_es;\n  __xregister_t mc_ds;\n  __xregister_t mc_edi;\n  __xregister_t mc_esi;\n  __xregister_t mc_ebp;\n  __xregister_t mc_isp;\n  __xregister_t mc_ebx;\n  __xregister_t mc_edx;\n  __xregister_t mc_ecx;\n  __xregister_t mc_eax;\n  __xregister_t mc_trapno;\n  __xregister_t mc_err;\n  __xregister_t mc_eip;\n  __xregister_t mc_cs;\n  __xregister_t mc_eflags;\n  __xregister_t mc_esp;\n  __xregister_t mc_ss;\n\n  int mc_len;\n  int mc_fpformat;\n  int mc_ownedfp;\n  __xregister_t mc_flags;\n\n  int mc_fpstate[128] __aligned(16);\n  __xregister_t mc_fsbase;\n  __xregister_t mc_gsbase;\n  __xregister_t mc_xfpustate;\n  __xregister_t mc_xfpustate_len;\n\n  int mc_spare2[4];\n} xmcontext_t;\n\ntypedef struct __xucontext {\n  sigset_t uc_sigmask;\n  xmcontext_t uc_mcontext;\n\n  struct __ucontext *uc_link;\n  stack_t uc_stack;\n  int uc_flags;\n  int __spare__[4];\n} xucontext_t;\n\nstruct xkinfo_vmentry {\n  int kve_structsize;\n  int kve_type;\n  __xuint64_t kve_start;\n  __xuint64_t kve_end;\n  __xuint64_t kve_offset;\n  __xuint64_t kve_vn_fileid;\n  __uint32_t kve_vn_fsid;\n  int kve_flags;\n  int kve_resident;\n  int kve_private_resident;\n  int kve_protection;\n  int kve_ref_count;\n  int kve_shadow_count;\n  int kve_vn_type;\n  __xuint64_t kve_vn_size;\n  __uint32_t kve_vn_rdev;\n  __uint16_t kve_vn_mode;\n  __uint16_t kve_status;\n  int _kve_ispare[12];\n  char kve_path[PATH_MAX];\n};\n\ntypedef struct {\n  __uint32_t p_type;\n  __uint32_t p_offset;\n  __uint32_t p_vaddr;\n  __uint32_t p_paddr;\n  __uint32_t p_filesz;\n  __uint32_t p_memsz;\n  __uint32_t p_flags;\n  __uint32_t p_align;\n} XElf32_Phdr;\n\nstruct xdl_phdr_info {\n  Elf_Addr dlpi_addr;\n  const char *dlpi_name;\n  const XElf32_Phdr *dlpi_phdr;\n  Elf_Half dlpi_phnum;\n  unsigned long long int dlpi_adds;\n  unsigned long long int dlpi_subs;\n  size_t dlpi_tls_modid;\n  void *dlpi_tls_data;\n};\n\ntypedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info *, size_t,\n                                          void *);\ntypedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void *);\n\n#define xdl_iterate_phdr(callback, param) \\\n  (((xdl_iterate_phdr_t *)dl_iterate_phdr)((callback), (param)))\n\n}  // namespace __sanitizer\n\n#endif  // __FreeBSD_version <= 902001\n#endif  // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)\n\n#endif  // SANITIZER_FREEBSD_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_fuchsia.cpp",
    "content": "//===-- sanitizer_fuchsia.cpp ---------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and other sanitizer\n// run-time libraries and implements Fuchsia-specific functions from\n// sanitizer_common.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_fuchsia.h\"\n#if SANITIZER_FUCHSIA\n\n#include <pthread.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <zircon/errors.h>\n#include <zircon/process.h>\n#include <zircon/syscalls.h>\n#include <zircon/utc.h>\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\nvoid NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }\n\nuptr internal_sched_yield() {\n  zx_status_t status = _zx_nanosleep(0);\n  CHECK_EQ(status, ZX_OK);\n  return 0;  // Why doesn't this return void?\n}\n\nvoid internal_usleep(u64 useconds) {\n  zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));\n  CHECK_EQ(status, ZX_OK);\n}\n\nu64 NanoTime() {\n  zx_handle_t utc_clock = _zx_utc_reference_get();\n  CHECK_NE(utc_clock, ZX_HANDLE_INVALID);\n  zx_time_t time;\n  zx_status_t status = _zx_clock_read(utc_clock, &time);\n  CHECK_EQ(status, ZX_OK);\n  return time;\n}\n\nu64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }\n\nuptr internal_getpid() {\n  zx_info_handle_basic_t info;\n  zx_status_t status =\n      _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,\n                          sizeof(info), NULL, NULL);\n  CHECK_EQ(status, ZX_OK);\n  uptr pid = static_cast<uptr>(info.koid);\n  CHECK_EQ(pid, info.koid);\n  return pid;\n}\n\nint internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }\n\nuptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }\n\ntid_t GetTid() { return GetThreadSelf(); }\n\nvoid Abort() { abort(); }\n\nint Atexit(void (*function)(void)) { return atexit(function); }\n\nvoid GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {\n  pthread_attr_t attr;\n  CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);\n  void *base;\n  size_t size;\n  CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);\n  CHECK_EQ(pthread_attr_destroy(&attr), 0);\n\n  *stack_bottom = reinterpret_cast<uptr>(base);\n  *stack_top = *stack_bottom + size;\n}\n\nvoid InitializePlatformEarly() {}\nvoid MaybeReexec() {}\nvoid CheckASLR() {}\nvoid CheckMPROTECT() {}\nvoid PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}\nvoid DisableCoreDumperIfNecessary() {}\nvoid InstallDeadlySignalHandlers(SignalHandlerType handler) {}\nvoid SetAlternateSignalStack() {}\nvoid UnsetAlternateSignalStack() {}\nvoid InitTlsSize() {}\n\nbool SignalContext::IsStackOverflow() const { return false; }\nvoid SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }\nconst char *SignalContext::Describe() const { UNIMPLEMENTED(); }\n\nvoid FutexWait(atomic_uint32_t *p, u32 cmp) {\n  zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,\n                                      ZX_HANDLE_INVALID, ZX_TIME_INFINITE);\n  if (status != ZX_ERR_BAD_STATE)  // Normal race.\n    CHECK_EQ(status, ZX_OK);\n}\n\nvoid FutexWake(atomic_uint32_t *p, u32 count) {\n  zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);\n  CHECK_EQ(status, ZX_OK);\n}\n\nuptr GetPageSize() { return _zx_system_get_page_size(); }\n\nuptr GetMmapGranularity() { return _zx_system_get_page_size(); }\n\nsanitizer_shadow_bounds_t ShadowBounds;\n\nvoid InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }\n\nuptr GetMaxUserVirtualAddress() {\n  InitShadowBounds();\n  return ShadowBounds.memory_limit - 1;\n}\n\nuptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }\n\nstatic void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,\n                                  bool raw_report, bool die_for_nomem) {\n  size = RoundUpTo(size, GetPageSize());\n\n  zx_handle_t vmo;\n  zx_status_t status = _zx_vmo_create(size, 0, &vmo);\n  if (status != ZX_OK) {\n    if (status != ZX_ERR_NO_MEMORY || die_for_nomem)\n      ReportMmapFailureAndDie(size, mem_type, \"zx_vmo_create\", status,\n                              raw_report);\n    return nullptr;\n  }\n  _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,\n                          internal_strlen(mem_type));\n\n  // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?\n  uintptr_t addr;\n  status =\n      _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,\n                   vmo, 0, size, &addr);\n  _zx_handle_close(vmo);\n\n  if (status != ZX_OK) {\n    if (status != ZX_ERR_NO_MEMORY || die_for_nomem)\n      ReportMmapFailureAndDie(size, mem_type, \"zx_vmar_map\", status,\n                              raw_report);\n    return nullptr;\n  }\n\n  IncreaseTotalMmap(size);\n\n  return reinterpret_cast<void *>(addr);\n}\n\nvoid *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {\n  return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);\n}\n\nvoid *MmapNoReserveOrDie(uptr size, const char *mem_type) {\n  return MmapOrDie(size, mem_type);\n}\n\nvoid *MmapOrDieOnFatalError(uptr size, const char *mem_type) {\n  return DoAnonymousMmapOrDie(size, mem_type, false, false);\n}\n\nuptr ReservedAddressRange::Init(uptr init_size, const char *name,\n                                uptr fixed_addr) {\n  init_size = RoundUpTo(init_size, GetPageSize());\n  DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);\n  uintptr_t base;\n  zx_handle_t vmar;\n  zx_status_t status = _zx_vmar_allocate(\n      _zx_vmar_root_self(),\n      ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,\n      init_size, &vmar, &base);\n  if (status != ZX_OK)\n    ReportMmapFailureAndDie(init_size, name, \"zx_vmar_allocate\", status);\n  base_ = reinterpret_cast<void *>(base);\n  size_ = init_size;\n  name_ = name;\n  os_handle_ = vmar;\n\n  return reinterpret_cast<uptr>(base_);\n}\n\nstatic uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,\n                             void *base, const char *name, bool die_for_nomem) {\n  uptr offset = fixed_addr - reinterpret_cast<uptr>(base);\n  map_size = RoundUpTo(map_size, GetPageSize());\n  zx_handle_t vmo;\n  zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);\n  if (status != ZX_OK) {\n    if (status != ZX_ERR_NO_MEMORY || die_for_nomem)\n      ReportMmapFailureAndDie(map_size, name, \"zx_vmo_create\", status);\n    return 0;\n  }\n  _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));\n  DCHECK_GE(base + size_, map_size + offset);\n  uintptr_t addr;\n\n  status =\n      _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,\n                   offset, vmo, 0, map_size, &addr);\n  _zx_handle_close(vmo);\n  if (status != ZX_OK) {\n    if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {\n      ReportMmapFailureAndDie(map_size, name, \"zx_vmar_map\", status);\n    }\n    return 0;\n  }\n  IncreaseTotalMmap(map_size);\n  return addr;\n}\n\nuptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,\n                               const char *name) {\n  return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,\n                          false);\n}\n\nuptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,\n                                    const char *name) {\n  return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);\n}\n\nvoid UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {\n  if (!addr || !size)\n    return;\n  size = RoundUpTo(size, GetPageSize());\n\n  zx_status_t status =\n      _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);\n  if (status != ZX_OK) {\n    Report(\"ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\\n\",\n           SanitizerToolName, size, size, addr);\n    CHECK(\"unable to unmap\" && 0);\n  }\n\n  DecreaseTotalMmap(size);\n}\n\nvoid ReservedAddressRange::Unmap(uptr addr, uptr size) {\n  CHECK_LE(size, size_);\n  const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);\n  if (addr == reinterpret_cast<uptr>(base_)) {\n    if (size == size_) {\n      // Destroying the vmar effectively unmaps the whole mapping.\n      _zx_vmar_destroy(vmar);\n      _zx_handle_close(vmar);\n      os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);\n      DecreaseTotalMmap(size);\n      return;\n    }\n  } else {\n    CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);\n  }\n  // Partial unmapping does not affect the fact that the initial range is still\n  // reserved, and the resulting unmapped memory can't be reused.\n  UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);\n}\n\n// This should never be called.\nvoid *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {\n  UNIMPLEMENTED();\n}\n\nbool MprotectNoAccess(uptr addr, uptr size) {\n  return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;\n}\n\nbool MprotectReadOnly(uptr addr, uptr size) {\n  return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==\n         ZX_OK;\n}\n\nvoid *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,\n                                   const char *mem_type) {\n  CHECK_GE(size, GetPageSize());\n  CHECK(IsPowerOfTwo(size));\n  CHECK(IsPowerOfTwo(alignment));\n\n  zx_handle_t vmo;\n  zx_status_t status = _zx_vmo_create(size, 0, &vmo);\n  if (status != ZX_OK) {\n    if (status != ZX_ERR_NO_MEMORY)\n      ReportMmapFailureAndDie(size, mem_type, \"zx_vmo_create\", status, false);\n    return nullptr;\n  }\n  _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,\n                          internal_strlen(mem_type));\n\n  // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?\n\n  // Map a larger size to get a chunk of address space big enough that\n  // it surely contains an aligned region of the requested size.  Then\n  // overwrite the aligned middle portion with a mapping from the\n  // beginning of the VMO, and unmap the excess before and after.\n  size_t map_size = size + alignment;\n  uintptr_t addr;\n  status =\n      _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,\n                   vmo, 0, map_size, &addr);\n  if (status == ZX_OK) {\n    uintptr_t map_addr = addr;\n    uintptr_t map_end = map_addr + map_size;\n    addr = RoundUpTo(map_addr, alignment);\n    uintptr_t end = addr + size;\n    if (addr != map_addr) {\n      zx_info_vmar_t info;\n      status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,\n                                   sizeof(info), NULL, NULL);\n      if (status == ZX_OK) {\n        uintptr_t new_addr;\n        status = _zx_vmar_map(\n            _zx_vmar_root_self(),\n            ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,\n            addr - info.base, vmo, 0, size, &new_addr);\n        if (status == ZX_OK)\n          CHECK_EQ(new_addr, addr);\n      }\n    }\n    if (status == ZX_OK && addr != map_addr)\n      status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);\n    if (status == ZX_OK && end != map_end)\n      status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);\n  }\n  _zx_handle_close(vmo);\n\n  if (status != ZX_OK) {\n    if (status != ZX_ERR_NO_MEMORY)\n      ReportMmapFailureAndDie(size, mem_type, \"zx_vmar_map\", status, false);\n    return nullptr;\n  }\n\n  IncreaseTotalMmap(size);\n\n  return reinterpret_cast<void *>(addr);\n}\n\nvoid UnmapOrDie(void *addr, uptr size) {\n  UnmapOrDieVmar(addr, size, _zx_vmar_root_self());\n}\n\nvoid ReleaseMemoryPagesToOS(uptr beg, uptr end) {\n  uptr beg_aligned = RoundUpTo(beg, GetPageSize());\n  uptr end_aligned = RoundDownTo(end, GetPageSize());\n  if (beg_aligned < end_aligned) {\n    zx_handle_t root_vmar = _zx_vmar_root_self();\n    CHECK_NE(root_vmar, ZX_HANDLE_INVALID);\n    zx_status_t status =\n        _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,\n                          end_aligned - beg_aligned, nullptr, 0);\n    CHECK_EQ(status, ZX_OK);\n  }\n}\n\nvoid DumpProcessMap() {\n  // TODO(mcgrathr): write it\n  return;\n}\n\nbool IsAccessibleMemoryRange(uptr beg, uptr size) {\n  // TODO(mcgrathr): Figure out a better way.\n  zx_handle_t vmo;\n  zx_status_t status = _zx_vmo_create(size, 0, &vmo);\n  if (status == ZX_OK) {\n    status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);\n    _zx_handle_close(vmo);\n  }\n  return status == ZX_OK;\n}\n\n// FIXME implement on this platform.\nvoid GetMemoryProfile(fill_profile_f cb, uptr *stats) {}\n\nbool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,\n                      uptr *read_len, uptr max_len, error_t *errno_p) {\n  zx_handle_t vmo;\n  zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);\n  if (status == ZX_OK) {\n    uint64_t vmo_size;\n    status = _zx_vmo_get_size(vmo, &vmo_size);\n    if (status == ZX_OK) {\n      if (vmo_size < max_len)\n        max_len = vmo_size;\n      size_t map_size = RoundUpTo(max_len, GetPageSize());\n      uintptr_t addr;\n      status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,\n                            map_size, &addr);\n      if (status == ZX_OK) {\n        *buff = reinterpret_cast<char *>(addr);\n        *buff_size = map_size;\n        *read_len = max_len;\n      }\n    }\n    _zx_handle_close(vmo);\n  }\n  if (status != ZX_OK && errno_p)\n    *errno_p = status;\n  return status == ZX_OK;\n}\n\nvoid RawWrite(const char *buffer) {\n  constexpr size_t size = 128;\n  static _Thread_local char line[size];\n  static _Thread_local size_t lastLineEnd = 0;\n  static _Thread_local size_t cur = 0;\n\n  while (*buffer) {\n    if (cur >= size) {\n      if (lastLineEnd == 0)\n        lastLineEnd = size;\n      __sanitizer_log_write(line, lastLineEnd);\n      internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);\n      cur = cur - lastLineEnd;\n      lastLineEnd = 0;\n    }\n    if (*buffer == '\\n')\n      lastLineEnd = cur + 1;\n    line[cur++] = *buffer++;\n  }\n  // Flush all complete lines before returning.\n  if (lastLineEnd != 0) {\n    __sanitizer_log_write(line, lastLineEnd);\n    internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);\n    cur = cur - lastLineEnd;\n    lastLineEnd = 0;\n  }\n}\n\nvoid CatastrophicErrorWrite(const char *buffer, uptr length) {\n  __sanitizer_log_write(buffer, length);\n}\n\nchar **StoredArgv;\nchar **StoredEnviron;\n\nchar **GetArgv() { return StoredArgv; }\nchar **GetEnviron() { return StoredEnviron; }\n\nconst char *GetEnv(const char *name) {\n  if (StoredEnviron) {\n    uptr NameLen = internal_strlen(name);\n    for (char **Env = StoredEnviron; *Env != 0; Env++) {\n      if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')\n        return (*Env) + NameLen + 1;\n    }\n  }\n  return nullptr;\n}\n\nuptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {\n  const char *argv0 = \"<UNKNOWN>\";\n  if (StoredArgv && StoredArgv[0]) {\n    argv0 = StoredArgv[0];\n  }\n  internal_strncpy(buf, argv0, buf_len);\n  return internal_strlen(buf);\n}\n\nuptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {\n  return ReadBinaryName(buf, buf_len);\n}\n\nuptr MainThreadStackBase, MainThreadStackSize;\n\nbool GetRandom(void *buffer, uptr length, bool blocking) {\n  CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);\n  _zx_cprng_draw(buffer, length);\n  return true;\n}\n\nu32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }\n\nuptr GetRSS() { UNIMPLEMENTED(); }\n\nvoid *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }\nvoid internal_join_thread(void *th) {}\n\nvoid InitializePlatformCommonFlags(CommonFlags *cf) {}\n\n}  // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nextern \"C\" {\nvoid __sanitizer_startup_hook(int argc, char **argv, char **envp,\n                              void *stack_base, size_t stack_size) {\n  __sanitizer::StoredArgv = argv;\n  __sanitizer::StoredEnviron = envp;\n  __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);\n  __sanitizer::MainThreadStackSize = stack_size;\n}\n\nvoid __sanitizer_set_report_path(const char *path) {\n  // Handle the initialization code in each sanitizer, but no other calls.\n  // This setting is never consulted on Fuchsia.\n  DCHECK_EQ(path, common_flags()->log_path);\n}\n\nvoid __sanitizer_set_report_fd(void *fd) {\n  UNREACHABLE(\"not available on Fuchsia\");\n}\n\nconst char *__sanitizer_get_report_path() {\n  UNREACHABLE(\"not available on Fuchsia\");\n}\n}  // extern \"C\"\n\n#endif  // SANITIZER_FUCHSIA\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_fuchsia.h",
    "content": "//===-- sanitizer_fuchsia.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===---------------------------------------------------------------------===//\n//\n// Fuchsia-specific sanitizer support.\n//\n//===---------------------------------------------------------------------===//\n#ifndef SANITIZER_FUCHSIA_H\n#define SANITIZER_FUCHSIA_H\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_FUCHSIA\n\n#include \"sanitizer_common.h\"\n\n#include <zircon/sanitizer.h>\n#include <zircon/syscalls/object.h>\n\nnamespace __sanitizer {\n\nextern uptr MainThreadStackBase, MainThreadStackSize;\nextern sanitizer_shadow_bounds_t ShadowBounds;\n\nstruct MemoryMappingLayoutData {\n  InternalMmapVector<zx_info_maps_t> data;\n  size_t current;  // Current index into the vector.\n};\n\nvoid InitShadowBounds();\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FUCHSIA\n#endif  // SANITIZER_FUCHSIA_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_getauxval.h",
    "content": "//===-- sanitizer_getauxval.h -----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common getauxval() guards and definitions.\n// getauxval() is not defined until glibc version 2.16, or until API level 21\n// for Android.\n// Implement the getauxval() compat function for NetBSD.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_GETAUXVAL_H\n#define SANITIZER_GETAUXVAL_H\n\n#include \"sanitizer_platform.h\"\n#include \"sanitizer_glibc_version.h\"\n\n#if SANITIZER_LINUX || SANITIZER_FUCHSIA\n\n# if (__GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \\\n      SANITIZER_FUCHSIA) &&                                                    \\\n     !SANITIZER_GO\n#  define SANITIZER_USE_GETAUXVAL 1\n# else\n#  define SANITIZER_USE_GETAUXVAL 0\n# endif\n\n# if SANITIZER_USE_GETAUXVAL\n#  include <sys/auxv.h>\n# else\n// The weak getauxval definition allows to check for the function at runtime.\n// This is useful for Android, when compiled at a lower API level yet running\n// on a more recent platform that offers the function.\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE unsigned long getauxval(unsigned long type);\n# endif\n\n#elif SANITIZER_NETBSD\n\n#define SANITIZER_USE_GETAUXVAL 1\n\n#include <dlfcn.h>\n#include <elf.h>\n\nstatic inline decltype(AuxInfo::a_v) getauxval(decltype(AuxInfo::a_type) type) {\n  for (const AuxInfo *aux = (const AuxInfo *)_dlauxinfo();\n       aux->a_type != AT_NULL; ++aux) {\n    if (type == aux->a_type)\n      return aux->a_v;\n  }\n\n  return 0;\n}\n\n#endif\n\n#endif // SANITIZER_GETAUXVAL_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_glibc_version.h",
    "content": "//===-- sanitizer_glibc_version.h -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_GLIBC_VERSION_H\n#define SANITIZER_GLIBC_VERSION_H\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_LINUX || SANITIZER_FUCHSIA\n#include <features.h>\n#endif\n\n#ifndef __GLIBC_PREREQ\n#define __GLIBC_PREREQ(x, y) 0\n#endif\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_hash.h",
    "content": "//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file implements a simple hash function.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_HASH_H\n#define SANITIZER_HASH_H\n\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\nclass MurMur2HashBuilder {\n  static const u32 m = 0x5bd1e995;\n  static const u32 seed = 0x9747b28c;\n  static const u32 r = 24;\n  u32 h;\n\n public:\n  explicit MurMur2HashBuilder(u32 init = 0) { h = seed ^ init; }\n  void add(u32 k) {\n    k *= m;\n    k ^= k >> r;\n    k *= m;\n    h *= m;\n    h ^= k;\n  }\n  u32 get() {\n    u32 x = h;\n    x ^= x >> 13;\n    x *= m;\n    x ^= x >> 15;\n    return x;\n  }\n};\n\nclass MurMur2Hash64Builder {\n  static const u64 m = 0xc6a4a7935bd1e995ull;\n  static const u64 seed = 0x9747b28c9747b28cull;\n  static const u64 r = 47;\n  u64 h;\n\n public:\n  explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }\n  void add(u64 k) {\n    k *= m;\n    k ^= k >> r;\n    k *= m;\n    h ^= k;\n    h *= m;\n  }\n  u64 get() {\n    u64 x = h;\n    x ^= x >> r;\n    x *= m;\n    x ^= x >> r;\n    return x;\n  }\n};\n}  //namespace __sanitizer\n\n#endif  // SANITIZER_HASH_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc",
    "content": "//===-- sanitizer_interceptors_ioctl_netbsd.inc -----------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Ioctl handling in common sanitizer interceptors.\n//===----------------------------------------------------------------------===//\n\n#if SANITIZER_NETBSD\n\n#include \"sanitizer_flags.h\"\n\nstruct ioctl_desc {\n  unsigned req;\n  // FIXME: support read+write arguments. Currently READWRITE and WRITE do the\n  // same thing.\n  // XXX: The declarations below may use WRITE instead of READWRITE, unless\n  // explicitly noted.\n  enum { NONE, READ, WRITE, READWRITE, CUSTOM } type : 3;\n  unsigned size : 29;\n  const char *name;\n};\n\nconst unsigned ioctl_table_max = 1238;\nstatic ioctl_desc ioctl_table[ioctl_table_max];\nstatic unsigned ioctl_table_size = 0;\n\n// This can not be declared as a global, because references to struct_*_sz\n// require a global initializer. And this table must be available before global\n// initializers are run.\nstatic void ioctl_table_fill() {\n#define _(rq, tp, sz)                                                          \\\n  if (IOCTL_##rq != IOCTL_NOT_PRESENT) {                                       \\\n    CHECK(ioctl_table_size < ioctl_table_max);                                 \\\n    ioctl_table[ioctl_table_size].req = IOCTL_##rq;                            \\\n    ioctl_table[ioctl_table_size].type = ioctl_desc::tp;                       \\\n    ioctl_table[ioctl_table_size].size = sz;                                   \\\n    ioctl_table[ioctl_table_size].name = #rq;                                  \\\n    ++ioctl_table_size;                                                        \\\n  }\n\n  /* Entries from file: altq/altq_afmap.h */\n  _(AFM_ADDFMAP, READWRITE, struct_atm_flowmap_sz);\n  _(AFM_DELFMAP, READWRITE, struct_atm_flowmap_sz);\n  _(AFM_CLEANFMAP, READWRITE, struct_atm_flowmap_sz);\n  _(AFM_GETFMAP, READWRITE, struct_atm_flowmap_sz);\n  /* Entries from file: altq/altq.h */\n  _(ALTQGTYPE, READWRITE, struct_altqreq_sz);\n  _(ALTQTBRSET, READ, struct_tbrreq_sz);\n  _(ALTQTBRGET, READWRITE, struct_tbrreq_sz);\n  /* Entries from file: altq/altq_blue.h */\n  _(BLUE_IF_ATTACH, READ, struct_blue_interface_sz);\n  _(BLUE_DISABLE, READ, struct_blue_interface_sz);\n  _(BLUE_CONFIG, READWRITE, struct_blue_conf_sz);\n  _(BLUE_GETSTATS, READWRITE, struct_blue_stats_sz);\n  /* Entries from file: altq/altq_cbq.h */\n  _(CBQ_ENABLE, READ, struct_cbq_interface_sz);\n  _(CBQ_ADD_CLASS, READWRITE, struct_cbq_add_class_sz);\n  _(CBQ_DEL_CLASS, READ, struct_cbq_delete_class_sz);\n  _(CBQ_MODIFY_CLASS, READWRITE, struct_cbq_modify_class_sz);\n  _(CBQ_DEL_FILTER, READ, struct_cbq_delete_filter_sz);\n  _(CBQ_GETSTATS, READWRITE, struct_cbq_getstats_sz);\n  /* Entries from file: altq/altq_cdnr.h */\n  _(CDNR_IF_DETACH, READ, struct_cdnr_interface_sz);\n  _(CDNR_ADD_FILTER, READWRITE, struct_cdnr_add_filter_sz);\n  _(CDNR_GETSTATS, READWRITE, struct_cdnr_get_stats_sz);\n  _(CDNR_ADD_ELEM, READWRITE, struct_cdnr_add_element_sz);\n  _(CDNR_DEL_ELEM, READ, struct_cdnr_delete_element_sz);\n  _(CDNR_ADD_TBM, READWRITE, struct_cdnr_add_tbmeter_sz);\n  _(CDNR_MOD_TBM, READ, struct_cdnr_modify_tbmeter_sz);\n  _(CDNR_TBM_STATS, READWRITE, struct_cdnr_tbmeter_stats_sz);\n  _(CDNR_ADD_TCM, READWRITE, struct_cdnr_add_trtcm_sz);\n  _(CDNR_MOD_TCM, READWRITE, struct_cdnr_modify_trtcm_sz);\n  _(CDNR_TCM_STATS, READWRITE, struct_cdnr_tcm_stats_sz);\n  _(CDNR_ADD_TSW, READWRITE, struct_cdnr_add_tswtcm_sz);\n  _(CDNR_MOD_TSW, READWRITE, struct_cdnr_modify_tswtcm_sz);\n  /* Entries from file: altq/altq_fifoq.h */\n  _(FIFOQ_CONFIG, READWRITE, struct_fifoq_conf_sz);\n  _(FIFOQ_GETSTATS, READWRITE, struct_fifoq_getstats_sz);\n  /* Entries from file: altq/altq_hfsc.h */\n  _(HFSC_CLEAR_HIERARCHY, READ, struct_hfsc_interface_sz);\n  _(HFSC_ADD_CLASS, READWRITE, struct_hfsc_add_class_sz);\n  _(HFSC_GETSTATS, READWRITE, struct_hfsc_class_stats_sz);\n  /* Entries from file: altq/altq_jobs.h */\n  _(JOBS_IF_ATTACH, READ, struct_jobs_attach_sz);\n  _(JOBS_IF_DETACH, READ, struct_jobs_interface_sz);\n  _(JOBS_ENABLE, READ, struct_jobs_interface_sz);\n  _(JOBS_DISABLE, READ, struct_jobs_interface_sz);\n  _(JOBS_CLEAR, READ, struct_jobs_interface_sz);\n  _(JOBS_ADD_CLASS, READWRITE, struct_jobs_add_class_sz);\n  _(JOBS_MOD_CLASS, READ, struct_jobs_modify_class_sz);\n  /* Entries from file: altq/altq_priq.h */\n  _(PRIQ_IF_ATTACH, READ, struct_priq_interface_sz);\n  _(PRIQ_CLEAR, READ, struct_priq_interface_sz);\n  _(PRIQ_ADD_CLASS, READWRITE, struct_priq_add_class_sz);\n  _(PRIQ_DEL_CLASS, READ, struct_priq_delete_class_sz);\n  _(PRIQ_MOD_CLASS, READ, struct_priq_modify_class_sz);\n  _(PRIQ_ADD_FILTER, READWRITE, struct_priq_add_filter_sz);\n  _(PRIQ_DEL_FILTER, READ, struct_priq_delete_filter_sz);\n  _(PRIQ_GETSTATS, READWRITE, struct_priq_class_stats_sz);\n  /* Entries from file: altq/altq_red.h */\n  _(RED_CONFIG, READWRITE, struct_red_conf_sz);\n  _(RED_GETSTATS, READWRITE, struct_red_stats_sz);\n  _(RED_SETDEFAULTS, READ, struct_redparams_sz);\n  /* Entries from file: altq/altq_rio.h */\n  _(RIO_CONFIG, READWRITE, struct_rio_conf_sz);\n  _(RIO_GETSTATS, READWRITE, struct_rio_stats_sz);\n  _(RIO_SETDEFAULTS, READ, struct_redparams_sz);\n  /* Entries from file: altq/altq_wfq.h */\n  _(WFQ_CONFIG, READWRITE, struct_wfq_conf_sz);\n  _(WFQ_GET_QID, READWRITE, struct_wfq_getqid_sz);\n  _(WFQ_SET_WEIGHT, READWRITE, struct_wfq_setweight_sz);\n  /* Entries from file: crypto/cryptodev.h */\n  _(CRIOGET, READWRITE, sizeof(u32));\n  _(CIOCFSESSION, READ, sizeof(u32));\n  _(CIOCKEY, READWRITE, struct_crypt_kop_sz);\n  _(CIOCNFKEYM, READWRITE, struct_crypt_mkop_sz);\n  _(CIOCNFSESSION, READ, struct_crypt_sfop_sz);\n  _(CIOCNCRYPTRETM, READWRITE, struct_cryptret_sz);\n  _(CIOCNCRYPTRET, READWRITE, struct_crypt_result_sz);\n  _(CIOCGSESSION, READWRITE, struct_session_op_sz);\n  _(CIOCNGSESSION, READWRITE, struct_crypt_sgop_sz);\n  _(CIOCCRYPT, READWRITE, struct_crypt_op_sz);\n  _(CIOCNCRYPTM, READWRITE, struct_crypt_mop_sz);\n  _(CIOCASYMFEAT, WRITE, sizeof(u32));\n  /* Entries from file: dev/apm/apmio.h */\n  _(APM_IOC_REJECT, READ, struct_apm_event_info_sz);\n  _(OAPM_IOC_GETPOWER, WRITE, struct_apm_power_info_sz);\n  _(APM_IOC_GETPOWER, READWRITE, struct_apm_power_info_sz);\n  _(APM_IOC_NEXTEVENT, WRITE, struct_apm_event_info_sz);\n  _(APM_IOC_DEV_CTL, READ, struct_apm_ctl_sz);\n  /* Entries from file: dev/dm/netbsd-dm.h */\n  _(NETBSD_DM_IOCTL, READWRITE, struct_plistref_sz);\n  /* Entries from file: dev/dmover/dmover_io.h */\n  _(DMIO_SETFUNC, READ, struct_dmio_setfunc_sz);\n  /* Entries from file: dev/dtv/dtvio_demux.h */\n  _(DMX_START, NONE, 0);\n  _(DMX_STOP, NONE, 0);\n  _(DMX_SET_FILTER, READ, struct_dmx_sct_filter_params_sz);\n  _(DMX_SET_PES_FILTER, READ, struct_dmx_pes_filter_params_sz);\n  _(DMX_SET_BUFFER_SIZE, NONE, 0);\n  _(DMX_GET_STC, READWRITE, struct_dmx_stc_sz);\n  _(DMX_ADD_PID, READ, sizeof(u16));\n  _(DMX_REMOVE_PID, READ, sizeof(u16));\n  _(DMX_GET_CAPS, WRITE, struct_dmx_caps_sz);\n  _(DMX_SET_SOURCE, READ, enum_dmx_source_sz);\n  /* Entries from file: dev/dtv/dtvio_frontend.h */\n  _(FE_READ_STATUS, WRITE, enum_fe_status_sz);\n  _(FE_READ_BER, WRITE, sizeof(u32));\n  _(FE_READ_SNR, WRITE, sizeof(u16));\n  _(FE_READ_SIGNAL_STRENGTH, WRITE, sizeof(u16));\n  _(FE_READ_UNCORRECTED_BLOCKS, WRITE, sizeof(u32));\n  _(FE_SET_FRONTEND, READWRITE, struct_dvb_frontend_parameters_sz);\n  _(FE_GET_FRONTEND, WRITE, struct_dvb_frontend_parameters_sz);\n  _(FE_GET_EVENT, WRITE, struct_dvb_frontend_event_sz);\n  _(FE_GET_INFO, WRITE, struct_dvb_frontend_info_sz);\n  _(FE_DISEQC_RESET_OVERLOAD, NONE, 0);\n  _(FE_DISEQC_SEND_MASTER_CMD, READ, struct_dvb_diseqc_master_cmd_sz);\n  _(FE_DISEQC_RECV_SLAVE_REPLY, WRITE, struct_dvb_diseqc_slave_reply_sz);\n  _(FE_DISEQC_SEND_BURST, READ, enum_fe_sec_mini_cmd_sz);\n  _(FE_SET_TONE, READ, enum_fe_sec_tone_mode_sz);\n  _(FE_SET_VOLTAGE, READ, enum_fe_sec_voltage_sz);\n  _(FE_ENABLE_HIGH_LNB_VOLTAGE, READ, sizeof(int));\n  _(FE_SET_FRONTEND_TUNE_MODE, READ, sizeof(unsigned int));\n  _(FE_DISHNETWORK_SEND_LEGACY_CMD, READ, sizeof(unsigned long));\n  /* Entries from file: dev/hdaudio/hdaudioio.h */\n  _(HDAUDIO_FGRP_INFO, READWRITE, struct_plistref_sz);\n  _(HDAUDIO_FGRP_GETCONFIG, READWRITE, struct_plistref_sz);\n  _(HDAUDIO_FGRP_SETCONFIG, READWRITE, struct_plistref_sz);\n  _(HDAUDIO_FGRP_WIDGET_INFO, READWRITE, struct_plistref_sz);\n  _(HDAUDIO_FGRP_CODEC_INFO, READWRITE, struct_plistref_sz);\n  _(HDAUDIO_AFG_WIDGET_INFO, READWRITE, struct_plistref_sz);\n  _(HDAUDIO_AFG_CODEC_INFO, READWRITE, struct_plistref_sz);\n  /* Entries from file: dev/hdmicec/hdmicecio.h */\n  _(CEC_GET_PHYS_ADDR, WRITE, sizeof(u16));\n  _(CEC_GET_LOG_ADDRS, WRITE, sizeof(u16));\n  _(CEC_SET_LOG_ADDRS, READ, sizeof(u16));\n  _(CEC_GET_VENDOR_ID, WRITE, sizeof(u32));\n  /* Entries from file: dev/hpc/hpcfbio.h */\n  _(HPCFBIO_GCONF, READWRITE, struct_hpcfb_fbconf_sz);\n  _(HPCFBIO_SCONF, READ, struct_hpcfb_fbconf_sz);\n  _(HPCFBIO_GDSPCONF, READWRITE, struct_hpcfb_dspconf_sz);\n  _(HPCFBIO_SDSPCONF, READ, struct_hpcfb_dspconf_sz);\n  _(HPCFBIO_GOP, WRITE, struct_hpcfb_dsp_op_sz);\n  _(HPCFBIO_SOP, READWRITE, struct_hpcfb_dsp_op_sz);\n  /* Entries from file: dev/i2o/iopio.h */\n  _(IOPIOCPT, READWRITE, struct_ioppt_sz);\n  _(IOPIOCGLCT, READWRITE, struct_iovec_sz);\n  _(IOPIOCGSTATUS, READWRITE, struct_iovec_sz);\n  _(IOPIOCRECONFIG, NONE, 0);\n  _(IOPIOCGTIDMAP, READWRITE, struct_iovec_sz);\n  /* Entries from file: dev/ic/athioctl.h */\n  _(SIOCGATHSTATS, READWRITE, struct_ifreq_sz);\n  _(SIOCGATHDIAG, READWRITE, struct_ath_diag_sz);\n  /* Entries from file: dev/ic/bt8xx.h */\n  _(METEORCAPTUR, READ, sizeof(int));\n  _(METEORCAPFRM, READ, struct_meteor_capframe_sz);\n  _(METEORSETGEO, READ, struct_meteor_geomet_sz);\n  _(METEORGETGEO, WRITE, struct_meteor_geomet_sz);\n  _(METEORSTATUS, WRITE, sizeof(unsigned short));\n  _(METEORSHUE, READ, sizeof(signed char));\n  _(METEORGHUE, WRITE, sizeof(signed char));\n  _(METEORSFMT, READ, sizeof(unsigned int));\n  _(METEORGFMT, WRITE, sizeof(unsigned int));\n  _(METEORSINPUT, READ, sizeof(unsigned int));\n  _(METEORGINPUT, WRITE, sizeof(unsigned int));\n  _(METEORSCHCV, READ, sizeof(unsigned char));\n  _(METEORGCHCV, WRITE, sizeof(unsigned char));\n  _(METEORSCOUNT, READ, struct_meteor_counts_sz);\n  _(METEORGCOUNT, WRITE, struct_meteor_counts_sz);\n  _(METEORSFPS, READ, sizeof(unsigned short));\n  _(METEORGFPS, WRITE, sizeof(unsigned short));\n  _(METEORSSIGNAL, READ, sizeof(unsigned int));\n  _(METEORGSIGNAL, WRITE, sizeof(unsigned int));\n  _(METEORSVIDEO, READ, struct_meteor_video_sz);\n  _(METEORGVIDEO, WRITE, struct_meteor_video_sz);\n  _(METEORSBRIG, READ, sizeof(unsigned char));\n  _(METEORGBRIG, WRITE, sizeof(unsigned char));\n  _(METEORSCSAT, READ, sizeof(unsigned char));\n  _(METEORGCSAT, WRITE, sizeof(unsigned char));\n  _(METEORSCONT, READ, sizeof(unsigned char));\n  _(METEORGCONT, WRITE, sizeof(unsigned char));\n  _(METEORSHWS, READ, sizeof(unsigned char));\n  _(METEORGHWS, WRITE, sizeof(unsigned char));\n  _(METEORSVWS, READ, sizeof(unsigned char));\n  _(METEORGVWS, WRITE, sizeof(unsigned char));\n  _(METEORSTS, READ, sizeof(unsigned char));\n  _(METEORGTS, WRITE, sizeof(unsigned char));\n  _(TVTUNER_SETCHNL, READ, sizeof(unsigned int));\n  _(TVTUNER_GETCHNL, WRITE, sizeof(unsigned int));\n  _(TVTUNER_SETTYPE, READ, sizeof(unsigned int));\n  _(TVTUNER_GETTYPE, WRITE, sizeof(unsigned int));\n  _(TVTUNER_GETSTATUS, WRITE, sizeof(unsigned int));\n  _(TVTUNER_SETFREQ, READ, sizeof(unsigned int));\n  _(TVTUNER_GETFREQ, WRITE, sizeof(unsigned int));\n  _(TVTUNER_SETAFC, READ, sizeof(int));\n  _(TVTUNER_GETAFC, WRITE, sizeof(int));\n  _(RADIO_SETMODE, READ, sizeof(unsigned int));\n  _(RADIO_GETMODE, WRITE, sizeof(unsigned char));\n  _(RADIO_SETFREQ, READ, sizeof(unsigned int));\n  _(RADIO_GETFREQ, WRITE, sizeof(unsigned int));\n  _(METEORSACTPIXFMT, READ, sizeof(int));\n  _(METEORGACTPIXFMT, WRITE, sizeof(int));\n  _(METEORGSUPPIXFMT, READWRITE, struct_meteor_pixfmt_sz);\n  _(TVTUNER_GETCHNLSET, READWRITE, struct_bktr_chnlset_sz);\n  _(REMOTE_GETKEY, WRITE, struct_bktr_remote_sz);\n  /* Entries from file: dev/ic/icp_ioctl.h */\n  _(GDT_IOCTL_GENERAL, READWRITE, struct_gdt_ucmd_sz);\n  _(GDT_IOCTL_DRVERS, WRITE, sizeof(int));\n  _(GDT_IOCTL_CTRTYPE, READWRITE, struct_gdt_ctrt_sz);\n  _(GDT_IOCTL_OSVERS, WRITE, struct_gdt_osv_sz);\n  _(GDT_IOCTL_CTRCNT, WRITE, sizeof(int));\n  _(GDT_IOCTL_EVENT, READWRITE, struct_gdt_event_sz);\n  _(GDT_IOCTL_STATIST, WRITE, struct_gdt_statist_sz);\n  _(GDT_IOCTL_RESCAN, READWRITE, struct_gdt_rescan_sz);\n  /* Entries from file: dev/ic/isp_ioctl.h */\n  _(ISP_SDBLEV, READWRITE, sizeof(int));\n  _(ISP_RESETHBA, NONE, 0);\n  _(ISP_RESCAN, NONE, 0);\n  _(ISP_SETROLE, READWRITE, sizeof(int));\n  _(ISP_GETROLE, WRITE, sizeof(int));\n  _(ISP_GET_STATS, WRITE, struct_isp_stats_sz);\n  _(ISP_CLR_STATS, NONE, 0);\n  _(ISP_FC_LIP, NONE, 0);\n  _(ISP_FC_GETDINFO, READWRITE, struct_isp_fc_device_sz);\n  _(ISP_GET_FW_CRASH_DUMP, NONE, 0);\n  _(ISP_FORCE_CRASH_DUMP, NONE, 0);\n  _(ISP_FC_GETHINFO, READWRITE, struct_isp_hba_device_sz);\n  _(ISP_TSK_MGMT, READWRITE, struct_isp_fc_tsk_mgmt_sz);\n  _(ISP_FC_GETDLIST, NONE, 0);\n  /* Entries from file: dev/ic/mlxio.h */\n  _(MLXD_STATUS, WRITE, sizeof(int));\n  _(MLXD_CHECKASYNC, WRITE, sizeof(int));\n  _(MLXD_DETACH, READ, sizeof(int));\n  _(MLX_RESCAN_DRIVES, NONE, 0);\n  _(MLX_PAUSE_CHANNEL, READ, struct_mlx_pause_sz);\n  _(MLX_COMMAND, READWRITE, struct_mlx_usercommand_sz);\n  _(MLX_REBUILDASYNC, READWRITE, struct_mlx_rebuild_request_sz);\n  _(MLX_REBUILDSTAT, WRITE, struct_mlx_rebuild_status_sz);\n  _(MLX_GET_SYSDRIVE, READWRITE, sizeof(int));\n  _(MLX_GET_CINFO, WRITE, struct_mlx_cinfo_sz);\n  /* Entries from file: dev/ic/nvmeio.h */\n  _(NVME_PASSTHROUGH_CMD, READWRITE, struct_nvme_pt_command_sz);\n  /* Entries from file: dev/ic/qemufwcfgio.h */\n  _(FWCFGIO_SET_INDEX, READ, sizeof(u16));\n  /* Entries from file: dev/ir/irdaio.h */\n  _(IRDA_RESET_PARAMS, NONE, 0);\n  _(IRDA_SET_PARAMS, READ, struct_irda_params_sz);\n  _(IRDA_GET_SPEEDMASK, WRITE, sizeof(unsigned int));\n  _(IRDA_GET_TURNAROUNDMASK, WRITE, sizeof(unsigned int));\n  _(IRFRAMETTY_GET_DEVICE, WRITE, sizeof(unsigned int));\n  _(IRFRAMETTY_GET_DONGLE, WRITE, sizeof(unsigned int));\n  _(IRFRAMETTY_SET_DONGLE, READ, sizeof(unsigned int));\n  /* Entries from file: dev/isa/isvio.h */\n  _(ISV_CMD, READWRITE, struct_isv_cmd_sz);\n  /* Entries from file: dev/isa/wtreg.h */\n  _(WTQICMD, NONE, 0);\n  /* Entries from file: dev/iscsi/iscsi_ioctl.h */\n  _(ISCSI_GET_VERSION, READWRITE, struct_iscsi_get_version_parameters_sz);\n  _(ISCSI_LOGIN, READWRITE, struct_iscsi_login_parameters_sz);\n  _(ISCSI_LOGOUT, READWRITE, struct_iscsi_logout_parameters_sz);\n  _(ISCSI_ADD_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz);\n  _(ISCSI_RESTORE_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz);\n  _(ISCSI_REMOVE_CONNECTION, READWRITE, struct_iscsi_remove_parameters_sz);\n  _(ISCSI_CONNECTION_STATUS, READWRITE, struct_iscsi_conn_status_parameters_sz);\n  _(ISCSI_SEND_TARGETS, READWRITE, struct_iscsi_send_targets_parameters_sz);\n  _(ISCSI_SET_NODE_NAME, READWRITE, struct_iscsi_set_node_name_parameters_sz);\n  _(ISCSI_IO_COMMAND, READWRITE, struct_iscsi_iocommand_parameters_sz);\n  _(ISCSI_REGISTER_EVENT, READWRITE, struct_iscsi_register_event_parameters_sz);\n  _(ISCSI_DEREGISTER_EVENT, READWRITE,\n    struct_iscsi_register_event_parameters_sz);\n  _(ISCSI_WAIT_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz);\n  _(ISCSI_POLL_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz);\n  /* Entries from file: dev/ofw/openfirmio.h */\n  _(OFIOCGET, READWRITE, struct_ofiocdesc_sz);\n  _(OFIOCSET, READ, struct_ofiocdesc_sz);\n  _(OFIOCNEXTPROP, READWRITE, struct_ofiocdesc_sz);\n  _(OFIOCGETOPTNODE, WRITE, sizeof(int));\n  _(OFIOCGETNEXT, READWRITE, sizeof(int));\n  _(OFIOCGETCHILD, READWRITE, sizeof(int));\n  _(OFIOCFINDDEVICE, READWRITE, struct_ofiocdesc_sz);\n  /* Entries from file: dev/pci/amrio.h */\n  _(AMR_IO_VERSION, WRITE, sizeof(int));\n  _(AMR_IO_COMMAND, READWRITE, struct_amr_user_ioctl_sz);\n  /* Entries from file: dev/pci/mlyio.h */\n  _(MLYIO_COMMAND, READWRITE, struct_mly_user_command_sz);\n  _(MLYIO_HEALTH, READ, struct_mly_user_health_sz);\n  /* Entries from file: dev/pci/pciio.h */\n  _(PCI_IOC_CFGREAD, READWRITE, struct_pciio_cfgreg_sz);\n  _(PCI_IOC_CFGWRITE, READ, struct_pciio_cfgreg_sz);\n  _(PCI_IOC_BDF_CFGREAD, READWRITE, struct_pciio_bdf_cfgreg_sz);\n  _(PCI_IOC_BDF_CFGWRITE, READ, struct_pciio_bdf_cfgreg_sz);\n  _(PCI_IOC_BUSINFO, WRITE, struct_pciio_businfo_sz);\n  _(PCI_IOC_DRVNAME, READWRITE, struct_pciio_drvname_sz);\n  _(PCI_IOC_DRVNAMEONBUS, READWRITE, struct_pciio_drvnameonbus_sz);\n  /* Entries from file: dev/pci/tweio.h */\n  _(TWEIO_COMMAND, READWRITE, struct_twe_usercommand_sz);\n  _(TWEIO_STATS, READWRITE, union_twe_statrequest_sz);\n  _(TWEIO_AEN_POLL, WRITE, sizeof(int));\n  _(TWEIO_AEN_WAIT, WRITE, sizeof(int));\n  _(TWEIO_SET_PARAM, READ, struct_twe_paramcommand_sz);\n  _(TWEIO_GET_PARAM, READ, struct_twe_paramcommand_sz);\n  _(TWEIO_RESET, NONE, 0);\n  _(TWEIO_ADD_UNIT, READ, struct_twe_drivecommand_sz);\n  _(TWEIO_DEL_UNIT, READ, struct_twe_drivecommand_sz);\n  /* Entries from file: dev/pcmcia/if_cnwioctl.h */\n  _(SIOCSCNWDOMAIN, READ, struct_ifreq_sz);\n  _(SIOCGCNWDOMAIN, READWRITE, struct_ifreq_sz);\n  _(SIOCSCNWKEY, READWRITE, struct_ifreq_sz);\n  _(SIOCGCNWSTATUS, READWRITE, struct_cnwstatus_sz);\n  _(SIOCGCNWSTATS, READWRITE, struct_cnwistats_sz);\n  _(SIOCGCNWTRAIL, READWRITE, struct_cnwitrail_sz);\n  /* Entries from file: dev/pcmcia/if_rayreg.h */\n  _(SIOCGRAYSIGLEV, READWRITE, struct_ifreq_sz);\n  /* Entries from file: dev/raidframe/raidframeio.h */\n  _(RAIDFRAME_SHUTDOWN, NONE, 0);\n  _(RAIDFRAME_TUR, READ, sizeof(u64));\n  _(RAIDFRAME_FAIL_DISK, READ, struct_rf_recon_req_sz);\n  _(RAIDFRAME_CHECK_RECON_STATUS, READWRITE, sizeof(int));\n  _(RAIDFRAME_REWRITEPARITY, NONE, 0);\n  _(RAIDFRAME_COPYBACK, NONE, 0);\n  _(RAIDFRAME_SPARET_WAIT, WRITE, struct_RF_SparetWait_sz);\n  _(RAIDFRAME_SEND_SPARET, READ, sizeof(uptr));\n  _(RAIDFRAME_ABORT_SPARET_WAIT, NONE, 0);\n  _(RAIDFRAME_START_ATRACE, NONE, 0);\n  _(RAIDFRAME_STOP_ATRACE, NONE, 0);\n  _(RAIDFRAME_GET_SIZE, WRITE, sizeof(int));\n  _(RAIDFRAME_RESET_ACCTOTALS, NONE, 0);\n  _(RAIDFRAME_KEEP_ACCTOTALS, READ, sizeof(int));\n  _(RAIDFRAME_GET_COMPONENT_LABEL, READWRITE, struct_RF_ComponentLabel_sz);\n  _(RAIDFRAME_SET_COMPONENT_LABEL, READ, struct_RF_ComponentLabel_sz);\n  _(RAIDFRAME_INIT_LABELS, READ, struct_RF_ComponentLabel_sz);\n  _(RAIDFRAME_ADD_HOT_SPARE, READ, struct_RF_SingleComponent_sz);\n  _(RAIDFRAME_REMOVE_HOT_SPARE, READ, struct_RF_SingleComponent_sz);\n  _(RAIDFRAME_REBUILD_IN_PLACE, READ, struct_RF_SingleComponent_sz);\n  _(RAIDFRAME_CHECK_PARITY, READWRITE, sizeof(int));\n  _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS, READWRITE, sizeof(int));\n  _(RAIDFRAME_CHECK_COPYBACK_STATUS, READWRITE, sizeof(int));\n  _(RAIDFRAME_SET_AUTOCONFIG, READWRITE, sizeof(int));\n  _(RAIDFRAME_SET_ROOT, READWRITE, sizeof(int));\n  _(RAIDFRAME_DELETE_COMPONENT, READ, struct_RF_SingleComponent_sz);\n  _(RAIDFRAME_INCORPORATE_HOT_SPARE, READ, struct_RF_SingleComponent_sz);\n  _(RAIDFRAME_CHECK_RECON_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz);\n  _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT, READWRITE,\n    struct_RF_ProgressInfo_sz);\n  _(RAIDFRAME_CHECK_COPYBACK_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz);\n  _(RAIDFRAME_PARITYMAP_STATUS, WRITE, struct_rf_pmstat_sz);\n  _(RAIDFRAME_PARITYMAP_GET_DISABLE, WRITE, sizeof(int));\n  _(RAIDFRAME_PARITYMAP_SET_DISABLE, READ, sizeof(int));\n  _(RAIDFRAME_PARITYMAP_SET_PARAMS, READ, struct_rf_pmparams_sz);\n  _(RAIDFRAME_SET_LAST_UNIT, READ, sizeof(int));\n  _(RAIDFRAME_GET_INFO, READWRITE, sizeof(uptr));\n  _(RAIDFRAME_CONFIGURE, READ, sizeof(uptr));\n  /* Entries from file: dev/sbus/mbppio.h */\n  _(MBPPIOCSPARAM, READ, struct_mbpp_param_sz);\n  _(MBPPIOCGPARAM, WRITE, struct_mbpp_param_sz);\n  _(MBPPIOCGSTAT, WRITE, sizeof(int));\n  /* Entries from file: dev/scsipi/ses.h */\n  _(SESIOC_GETNOBJ, NONE, 0);\n  _(SESIOC_GETOBJMAP, NONE, 0);\n  _(SESIOC_GETENCSTAT, NONE, 0);\n  _(SESIOC_SETENCSTAT, NONE, 0);\n  _(SESIOC_GETOBJSTAT, NONE, 0);\n  _(SESIOC_SETOBJSTAT, NONE, 0);\n  _(SESIOC_GETTEXT, NONE, 0);\n  _(SESIOC_INIT, NONE, 0);\n  /* Entries from file: dev/sun/disklabel.h */\n  _(SUN_DKIOCGGEOM, WRITE, struct_sun_dkgeom_sz);\n  _(SUN_DKIOCINFO, WRITE, struct_sun_dkctlr_sz);\n  _(SUN_DKIOCGPART, WRITE, struct_sun_dkpart_sz);\n  /* Entries from file: dev/sun/fbio.h */\n  _(FBIOGTYPE, WRITE, struct_fbtype_sz);\n  _(FBIOPUTCMAP, READ, struct_fbcmap_sz);\n  _(FBIOGETCMAP, READ, struct_fbcmap_sz);\n  _(FBIOGATTR, WRITE, struct_fbgattr_sz);\n  _(FBIOSVIDEO, READ, sizeof(int));\n  _(FBIOGVIDEO, WRITE, sizeof(int));\n  _(FBIOSCURSOR, READ, struct_fbcursor_sz);\n  _(FBIOGCURSOR, READWRITE, struct_fbcursor_sz);\n  _(FBIOSCURPOS, READ, struct_fbcurpos_sz);\n  _(FBIOGCURPOS, READ, struct_fbcurpos_sz);\n  _(FBIOGCURMAX, WRITE, struct_fbcurpos_sz);\n  /* Entries from file: dev/sun/kbio.h */\n  _(KIOCTRANS, READ, sizeof(int));\n  _(KIOCSETKEY, READWRITE, struct_okiockey_sz);\n  _(KIOCGETKEY, READWRITE, struct_okiockey_sz);\n  _(KIOCGTRANS, WRITE, sizeof(int));\n  _(KIOCCMD, READ, sizeof(int));\n  _(KIOCTYPE, WRITE, sizeof(int));\n  _(KIOCSDIRECT, READ, sizeof(int));\n  _(KIOCSKEY, READ, struct_kiockeymap_sz);\n  _(KIOCGKEY, READWRITE, struct_kiockeymap_sz);\n  _(KIOCSLED, READ, sizeof(char));\n  _(KIOCGLED, WRITE, sizeof(char));\n  _(KIOCLAYOUT, WRITE, sizeof(int));\n  /* Entries from file: dev/sun/vuid_event.h */\n  _(VUIDSFORMAT, READ, sizeof(int));\n  _(VUIDGFORMAT, WRITE, sizeof(int));\n  /* Entries from file: dev/tc/sticio.h */\n  _(STICIO_GXINFO, WRITE, struct_stic_xinfo_sz);\n  _(STICIO_RESET, NONE, 0);\n  _(STICIO_STARTQ, NONE, 0);\n  _(STICIO_STOPQ, NONE, 0);\n  /* Entries from file: dev/usb/ukyopon.h */\n  _(UKYOPON_IDENTIFY, WRITE, struct_ukyopon_identify_sz);\n  /* Entries from file: dev/usb/usb.h */\n  _(USB_REQUEST, READWRITE, struct_usb_ctl_request_sz);\n  _(USB_SETDEBUG, READ, sizeof(int));\n  _(USB_DISCOVER, NONE, 0);\n  _(USB_DEVICEINFO, READWRITE, struct_usb_device_info_sz);\n  _(USB_DEVICEINFO_OLD, READWRITE, struct_usb_device_info_old_sz);\n  _(USB_DEVICESTATS, WRITE, struct_usb_device_stats_sz);\n  _(USB_GET_REPORT_DESC, WRITE, struct_usb_ctl_report_desc_sz);\n  _(USB_SET_IMMED, READ, sizeof(int));\n  _(USB_GET_REPORT, READWRITE, struct_usb_ctl_report_sz);\n  _(USB_SET_REPORT, READ, struct_usb_ctl_report_sz);\n  _(USB_GET_REPORT_ID, WRITE, sizeof(int));\n  _(USB_GET_CONFIG, WRITE, sizeof(int));\n  _(USB_SET_CONFIG, READ, sizeof(int));\n  _(USB_GET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz);\n  _(USB_SET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz);\n  _(USB_GET_NO_ALT, READWRITE, struct_usb_alt_interface_sz);\n  _(USB_GET_DEVICE_DESC, WRITE, struct_usb_device_descriptor_sz);\n  _(USB_GET_CONFIG_DESC, READWRITE, struct_usb_config_desc_sz);\n  _(USB_GET_INTERFACE_DESC, READWRITE, struct_usb_interface_desc_sz);\n  _(USB_GET_ENDPOINT_DESC, READWRITE, struct_usb_endpoint_desc_sz);\n  _(USB_GET_FULL_DESC, READWRITE, struct_usb_full_desc_sz);\n  _(USB_GET_STRING_DESC, READWRITE, struct_usb_string_desc_sz);\n  _(USB_DO_REQUEST, READWRITE, struct_usb_ctl_request_sz);\n  _(USB_GET_DEVICEINFO, WRITE, struct_usb_device_info_sz);\n  _(USB_GET_DEVICEINFO_OLD, WRITE, struct_usb_device_info_old_sz);\n  _(USB_SET_SHORT_XFER, READ, sizeof(int));\n  _(USB_SET_TIMEOUT, READ, sizeof(int));\n  _(USB_SET_BULK_RA, READ, sizeof(int));\n  _(USB_SET_BULK_WB, READ, sizeof(int));\n  _(USB_SET_BULK_RA_OPT, READ, struct_usb_bulk_ra_wb_opt_sz);\n  _(USB_SET_BULK_WB_OPT, READ, struct_usb_bulk_ra_wb_opt_sz);\n  _(USB_GET_CM_OVER_DATA, WRITE, sizeof(int));\n  _(USB_SET_CM_OVER_DATA, READ, sizeof(int));\n  /* Entries from file: dev/usb/utoppy.h */\n  _(UTOPPYIOTURBO, READ, sizeof(int));\n  _(UTOPPYIOREBOOT, NONE, 0);\n  _(UTOPPYIOSTATS, WRITE, struct_utoppy_stats_sz);\n  _(UTOPPYIORENAME, READ, struct_utoppy_rename_sz);\n  _(UTOPPYIOMKDIR, READ, sizeof(uptr));\n  _(UTOPPYIODELETE, READ, sizeof(uptr));\n  _(UTOPPYIOREADDIR, READ, sizeof(uptr));\n  _(UTOPPYIOREADFILE, READ, struct_utoppy_readfile_sz);\n  _(UTOPPYIOWRITEFILE, READ, struct_utoppy_writefile_sz);\n  /* Entries from file: dev/vme/xio.h */\n  _(DIOSXDCMD, READWRITE, struct_xd_iocmd_sz);\n  /* Entries from file: dev/wscons/wsdisplay_usl_io.h */\n  _(VT_OPENQRY, WRITE, sizeof(int));\n  _(VT_SETMODE, READ, struct_vt_mode_sz);\n  _(VT_GETMODE, WRITE, struct_vt_mode_sz);\n  _(VT_RELDISP, NONE, 0);\n  _(VT_ACTIVATE, NONE, 0);\n  _(VT_WAITACTIVE, NONE, 0);\n  _(VT_GETACTIVE, WRITE, sizeof(int));\n  _(VT_GETSTATE, WRITE, struct_vt_stat_sz);\n  _(KDGETKBENT, READWRITE, struct_kbentry_sz);\n  _(KDGKBMODE, WRITE, sizeof(int));\n  _(KDSKBMODE, NONE, 0);\n  _(KDMKTONE, NONE, 0);\n  _(KDSETMODE, NONE, 0);\n  _(KDENABIO, NONE, 0);\n  _(KDDISABIO, NONE, 0);\n  _(KDGKBTYPE, WRITE, sizeof(char));\n  _(KDGETLED, WRITE, sizeof(int));\n  _(KDSETLED, NONE, 0);\n  _(KDSETRAD, NONE, 0);\n  _(VGAPCVTID, READWRITE, struct_pcvtid_sz);\n  _(CONS_GETVERS, WRITE, sizeof(int));\n  /* Entries from file: dev/wscons/wsconsio.h */\n  _(WSKBDIO_GTYPE, WRITE, sizeof(unsigned int));\n  _(WSKBDIO_BELL, NONE, 0);\n  _(WSKBDIO_COMPLEXBELL, READ, struct_wskbd_bell_data_sz);\n  _(WSKBDIO_SETBELL, READ, struct_wskbd_bell_data_sz);\n  _(WSKBDIO_GETBELL, WRITE, struct_wskbd_bell_data_sz);\n  _(WSKBDIO_SETDEFAULTBELL, READ, struct_wskbd_bell_data_sz);\n  _(WSKBDIO_GETDEFAULTBELL, WRITE, struct_wskbd_bell_data_sz);\n  _(WSKBDIO_SETKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz);\n  _(WSKBDIO_GETKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz);\n  _(WSKBDIO_SETDEFAULTKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz);\n  _(WSKBDIO_GETDEFAULTKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz);\n  _(WSKBDIO_SETLEDS, READ, sizeof(int));\n  _(WSKBDIO_GETLEDS, WRITE, sizeof(int));\n  _(WSKBDIO_GETMAP, READWRITE, struct_wskbd_map_data_sz);\n  _(WSKBDIO_SETMAP, READ, struct_wskbd_map_data_sz);\n  _(WSKBDIO_GETENCODING, WRITE, sizeof(int));\n  _(WSKBDIO_SETENCODING, READ, sizeof(int));\n  _(WSKBDIO_SETMODE, READ, sizeof(int));\n  _(WSKBDIO_GETMODE, WRITE, sizeof(int));\n  _(WSKBDIO_SETKEYCLICK, READ, sizeof(int));\n  _(WSKBDIO_GETKEYCLICK, WRITE, sizeof(int));\n  _(WSKBDIO_GETSCROLL, WRITE, struct_wskbd_scroll_data_sz);\n  _(WSKBDIO_SETSCROLL, READ, struct_wskbd_scroll_data_sz);\n  _(WSKBDIO_SETVERSION, READ, sizeof(int));\n  _(WSMOUSEIO_GTYPE, WRITE, sizeof(unsigned int));\n  _(WSMOUSEIO_SRES, READ, sizeof(unsigned int));\n  _(WSMOUSEIO_SSCALE, READ, sizeof(unsigned int));\n  _(WSMOUSEIO_SRATE, READ, sizeof(unsigned int));\n  _(WSMOUSEIO_SCALIBCOORDS, READ, struct_wsmouse_calibcoords_sz);\n  _(WSMOUSEIO_GCALIBCOORDS, WRITE, struct_wsmouse_calibcoords_sz);\n  _(WSMOUSEIO_GETID, READWRITE, struct_wsmouse_id_sz);\n  _(WSMOUSEIO_GETREPEAT, WRITE, struct_wsmouse_repeat_sz);\n  _(WSMOUSEIO_SETREPEAT, READ, struct_wsmouse_repeat_sz);\n  _(WSMOUSEIO_SETVERSION, READ, sizeof(int));\n  _(WSDISPLAYIO_GTYPE, WRITE, sizeof(unsigned int));\n  _(WSDISPLAYIO_GINFO, WRITE, struct_wsdisplay_fbinfo_sz);\n  _(WSDISPLAYIO_GETCMAP, READ, struct_wsdisplay_cmap_sz);\n  _(WSDISPLAYIO_PUTCMAP, READ, struct_wsdisplay_cmap_sz);\n  _(WSDISPLAYIO_GVIDEO, WRITE, sizeof(unsigned int));\n  _(WSDISPLAYIO_SVIDEO, READ, sizeof(unsigned int));\n  _(WSDISPLAYIO_GCURPOS, WRITE, struct_wsdisplay_curpos_sz);\n  _(WSDISPLAYIO_SCURPOS, READ, struct_wsdisplay_curpos_sz);\n  _(WSDISPLAYIO_GCURMAX, WRITE, struct_wsdisplay_curpos_sz);\n  _(WSDISPLAYIO_GCURSOR, READWRITE, struct_wsdisplay_cursor_sz);\n  _(WSDISPLAYIO_SCURSOR, READ, struct_wsdisplay_cursor_sz);\n  _(WSDISPLAYIO_GMODE, WRITE, sizeof(unsigned int));\n  _(WSDISPLAYIO_SMODE, READ, sizeof(unsigned int));\n  _(WSDISPLAYIO_LDFONT, READ, struct_wsdisplay_font_sz);\n  _(WSDISPLAYIO_ADDSCREEN, READ, struct_wsdisplay_addscreendata_sz);\n  _(WSDISPLAYIO_DELSCREEN, READ, struct_wsdisplay_delscreendata_sz);\n  _(WSDISPLAYIO_SFONT, READ, struct_wsdisplay_usefontdata_sz);\n  _(_O_WSDISPLAYIO_SETKEYBOARD, READWRITE, struct_wsdisplay_kbddata_sz);\n  _(WSDISPLAYIO_GETPARAM, READWRITE, struct_wsdisplay_param_sz);\n  _(WSDISPLAYIO_SETPARAM, READWRITE, struct_wsdisplay_param_sz);\n  _(WSDISPLAYIO_GETACTIVESCREEN, WRITE, sizeof(int));\n  _(WSDISPLAYIO_GETWSCHAR, READWRITE, struct_wsdisplay_char_sz);\n  _(WSDISPLAYIO_PUTWSCHAR, READWRITE, struct_wsdisplay_char_sz);\n  _(WSDISPLAYIO_DGSCROLL, WRITE, struct_wsdisplay_scroll_data_sz);\n  _(WSDISPLAYIO_DSSCROLL, READ, struct_wsdisplay_scroll_data_sz);\n  _(WSDISPLAYIO_GMSGATTRS, WRITE, struct_wsdisplay_msgattrs_sz);\n  _(WSDISPLAYIO_SMSGATTRS, READ, struct_wsdisplay_msgattrs_sz);\n  _(WSDISPLAYIO_GBORDER, WRITE, sizeof(int));\n  _(WSDISPLAYIO_SBORDER, READ, sizeof(int));\n  _(WSDISPLAYIO_SSPLASH, READ, sizeof(int));\n  _(WSDISPLAYIO_SPROGRESS, READ, sizeof(int));\n  _(WSDISPLAYIO_LINEBYTES, WRITE, sizeof(unsigned int));\n  _(WSDISPLAYIO_SETVERSION, READ, sizeof(int));\n  _(WSMUXIO_ADD_DEVICE, READ, struct_wsmux_device_sz);\n  _(WSMUXIO_REMOVE_DEVICE, READ, struct_wsmux_device_sz);\n  _(WSMUXIO_LIST_DEVICES, READWRITE, struct_wsmux_device_list_sz);\n  _(WSMUXIO_INJECTEVENT, READ, struct_wscons_event_sz);\n  _(WSDISPLAYIO_GET_BUSID, WRITE, struct_wsdisplayio_bus_id_sz);\n  _(WSDISPLAYIO_GET_EDID, READWRITE, struct_wsdisplayio_edid_info_sz);\n  _(WSDISPLAYIO_SET_POLLING, READ, sizeof(int));\n  _(WSDISPLAYIO_GET_FBINFO, READWRITE, struct_wsdisplayio_fbinfo_sz);\n  _(WSDISPLAYIO_DOBLIT, READWRITE, struct_wsdisplayio_blit_sz);\n  _(WSDISPLAYIO_WAITBLIT, READWRITE, struct_wsdisplayio_blit_sz);\n  /* Entries from file: dev/biovar.h */\n  _(BIOCLOCATE, READWRITE, struct_bio_locate_sz);\n  _(BIOCINQ, READWRITE, struct_bioc_inq_sz);\n  _(BIOCDISK_NOVOL, READWRITE, struct_bioc_disk_sz);\n  _(BIOCDISK, READWRITE, struct_bioc_disk_sz);\n  _(BIOCVOL, READWRITE, struct_bioc_vol_sz);\n  _(BIOCALARM, READWRITE, struct_bioc_alarm_sz);\n  _(BIOCBLINK, READWRITE, struct_bioc_blink_sz);\n  _(BIOCSETSTATE, READWRITE, struct_bioc_setstate_sz);\n  _(BIOCVOLOPS, READWRITE, struct_bioc_volops_sz);\n  /* Entries from file: dev/md.h */\n  _(MD_GETCONF, WRITE, struct_md_conf_sz);\n  _(MD_SETCONF, READ, struct_md_conf_sz);\n  /* Entries from file: dev/ccdvar.h */\n  _(CCDIOCSET, READWRITE, struct_ccd_ioctl_sz);\n  _(CCDIOCCLR, READ, struct_ccd_ioctl_sz);\n  /* Entries from file: dev/cgdvar.h */\n  _(CGDIOCSET, READWRITE, struct_cgd_ioctl_sz);\n  _(CGDIOCCLR, READ, struct_cgd_ioctl_sz);\n  _(CGDIOCGET, READWRITE, struct_cgd_user_sz);\n  /* Entries from file: dev/fssvar.h */\n  _(FSSIOCSET, READ, struct_fss_set_sz);\n  _(FSSIOCGET, WRITE, struct_fss_get_sz);\n  _(FSSIOCCLR, NONE, 0);\n  _(FSSIOFSET, READ, sizeof(int));\n  _(FSSIOFGET, WRITE, sizeof(int));\n  /* Entries from file: dev/bluetooth/btdev.h */\n  _(BTDEV_ATTACH, READ, struct_plistref_sz);\n  _(BTDEV_DETACH, READ, struct_plistref_sz);\n  /* Entries from file: dev/bluetooth/btsco.h */\n  _(BTSCO_GETINFO, WRITE, struct_btsco_info_sz);\n  /* Entries from file: dev/kttcpio.h */\n  _(KTTCP_IO_SEND, READWRITE, struct_kttcp_io_args_sz);\n  _(KTTCP_IO_RECV, READWRITE, struct_kttcp_io_args_sz);\n  /* Entries from file: dev/lockstat.h */\n  _(IOC_LOCKSTAT_GVERSION, WRITE, sizeof(int));\n  _(IOC_LOCKSTAT_ENABLE, READ, struct_lsenable_sz);\n  _(IOC_LOCKSTAT_DISABLE, WRITE, struct_lsdisable_sz);\n  /* Entries from file: dev/vndvar.h */\n  _(VNDIOCSET, READWRITE, struct_vnd_ioctl_sz);\n  _(VNDIOCCLR, READ, struct_vnd_ioctl_sz);\n  _(VNDIOCGET, READWRITE, struct_vnd_user_sz);\n  /* Entries from file: dev/spkrio.h */\n  _(SPKRTONE, READ, struct_tone_sz);\n  _(SPKRTUNE, NONE, 0);\n  _(SPKRGETVOL, WRITE, sizeof(unsigned int));\n  _(SPKRSETVOL, READ, sizeof(unsigned int));\n#if defined(__x86_64__)\n  /* Entries from file: dev/nvmm/nvmm_ioctl.h */\n  _(NVMM_IOC_CAPABILITY, WRITE, struct_nvmm_ioc_capability_sz);\n  _(NVMM_IOC_MACHINE_CREATE, READWRITE, struct_nvmm_ioc_machine_create_sz);\n  _(NVMM_IOC_MACHINE_DESTROY, READ, struct_nvmm_ioc_machine_destroy_sz);\n  _(NVMM_IOC_MACHINE_CONFIGURE, READ, struct_nvmm_ioc_machine_configure_sz);\n  _(NVMM_IOC_VCPU_CREATE, READ, struct_nvmm_ioc_vcpu_create_sz);\n  _(NVMM_IOC_VCPU_DESTROY, READ, struct_nvmm_ioc_vcpu_destroy_sz);\n  _(NVMM_IOC_VCPU_CONFIGURE, READ, struct_nvmm_ioc_vcpu_configure_sz);\n  _(NVMM_IOC_VCPU_SETSTATE, READ, struct_nvmm_ioc_vcpu_setstate_sz);\n  _(NVMM_IOC_VCPU_GETSTATE, READ, struct_nvmm_ioc_vcpu_getstate_sz);\n  _(NVMM_IOC_VCPU_INJECT, READ, struct_nvmm_ioc_vcpu_inject_sz);\n  _(NVMM_IOC_VCPU_RUN, READWRITE, struct_nvmm_ioc_vcpu_run_sz);\n  _(NVMM_IOC_GPA_MAP, READ, struct_nvmm_ioc_gpa_map_sz);\n  _(NVMM_IOC_GPA_UNMAP, READ, struct_nvmm_ioc_gpa_unmap_sz);\n  _(NVMM_IOC_HVA_MAP, READ, struct_nvmm_ioc_hva_map_sz);\n  _(NVMM_IOC_HVA_UNMAP, READ, struct_nvmm_ioc_hva_unmap_sz);\n  _(NVMM_IOC_CTL, READ, struct_nvmm_ioc_ctl_sz);\n#endif\n  /* Entries from file: dev/spi/spi_io.h */\n  _(SPI_IOCTL_CONFIGURE, READ, struct_spi_ioctl_configure_sz);\n  _(SPI_IOCTL_TRANSFER, READ, struct_spi_ioctl_transfer_sz);\n  /* Entries from file: fs/autofs/autofs_ioctl.h */\n  _(AUTOFSREQUEST, WRITE, struct_autofs_daemon_request_sz);\n  _(AUTOFSDONE, READ, struct_autofs_daemon_done_sz);\n  /* Entries from file: net/bpf.h */\n  _(BIOCGBLEN, WRITE, sizeof(unsigned int));\n  _(BIOCSBLEN, READWRITE, sizeof(unsigned int));\n  _(BIOCSETF, READ, struct_bpf_program_sz);\n  _(BIOCFLUSH, NONE, 0);\n  _(BIOCPROMISC, NONE, 0);\n  _(BIOCGDLT, WRITE, sizeof(unsigned int));\n  _(BIOCGETIF, WRITE, struct_ifreq_sz);\n  _(BIOCSETIF, READ, struct_ifreq_sz);\n  _(BIOCGSTATS, WRITE, struct_bpf_stat_sz);\n  _(BIOCGSTATSOLD, WRITE, struct_bpf_stat_old_sz);\n  _(BIOCIMMEDIATE, READ, sizeof(unsigned int));\n  _(BIOCVERSION, WRITE, struct_bpf_version_sz);\n  _(BIOCSTCPF, READ, struct_bpf_program_sz);\n  _(BIOCSUDPF, READ, struct_bpf_program_sz);\n  _(BIOCGHDRCMPLT, WRITE, sizeof(unsigned int));\n  _(BIOCSHDRCMPLT, READ, sizeof(unsigned int));\n  _(BIOCSDLT, READ, sizeof(unsigned int));\n  _(BIOCGDLTLIST, READWRITE, struct_bpf_dltlist_sz);\n  _(BIOCGDIRECTION, WRITE, sizeof(unsigned int));\n  _(BIOCSDIRECTION, READ, sizeof(unsigned int));\n  _(BIOCSRTIMEOUT, READ, struct_timeval_sz);\n  _(BIOCGRTIMEOUT, WRITE, struct_timeval_sz);\n  _(BIOCGFEEDBACK, WRITE, sizeof(unsigned int));\n  _(BIOCSFEEDBACK, READ, sizeof(unsigned int));\n  /* Entries from file: net/if_gre.h */\n  _(GRESADDRS, READ, struct_ifreq_sz);\n  _(GRESADDRD, READ, struct_ifreq_sz);\n  _(GREGADDRS, READWRITE, struct_ifreq_sz);\n  _(GREGADDRD, READWRITE, struct_ifreq_sz);\n  _(GRESPROTO, READ, struct_ifreq_sz);\n  _(GREGPROTO, READWRITE, struct_ifreq_sz);\n  _(GRESSOCK, READ, struct_ifreq_sz);\n  _(GREDSOCK, READ, struct_ifreq_sz);\n  /* Entries from file: net/if_ppp.h */\n  _(PPPIOCGRAWIN, WRITE, struct_ppp_rawin_sz);\n  _(PPPIOCGFLAGS, WRITE, sizeof(int));\n  _(PPPIOCSFLAGS, READ, sizeof(int));\n  _(PPPIOCGASYNCMAP, WRITE, sizeof(int));\n  _(PPPIOCSASYNCMAP, READ, sizeof(int));\n  _(PPPIOCGUNIT, WRITE, sizeof(int));\n  _(PPPIOCGRASYNCMAP, WRITE, sizeof(int));\n  _(PPPIOCSRASYNCMAP, READ, sizeof(int));\n  _(PPPIOCGMRU, WRITE, sizeof(int));\n  _(PPPIOCSMRU, READ, sizeof(int));\n  _(PPPIOCSMAXCID, READ, sizeof(int));\n  _(PPPIOCGXASYNCMAP, WRITE, (8 * sizeof(u32)));\n  _(PPPIOCSXASYNCMAP, READ, (8 * sizeof(u32)));\n  _(PPPIOCXFERUNIT, NONE, 0);\n  _(PPPIOCSCOMPRESS, READ, struct_ppp_option_data_sz);\n  _(PPPIOCGNPMODE, READWRITE, struct_npioctl_sz);\n  _(PPPIOCSNPMODE, READ, struct_npioctl_sz);\n  _(PPPIOCGIDLE, WRITE, struct_ppp_idle_sz);\n  _(PPPIOCGMTU, WRITE, sizeof(int));\n  _(PPPIOCSMTU, READ, sizeof(int));\n  _(SIOCGPPPSTATS, READWRITE, struct_ifpppstatsreq_sz);\n  _(SIOCGPPPCSTATS, READWRITE, struct_ifpppcstatsreq_sz);\n  /* Entries from file: net/npf.h */\n  _(IOC_NPF_VERSION, WRITE, sizeof(int));\n  _(IOC_NPF_SWITCH, READ, sizeof(int));\n  _(IOC_NPF_LOAD, READWRITE, struct_nvlist_ref_sz);\n  _(IOC_NPF_TABLE, READ, struct_npf_ioctl_table_sz);\n  _(IOC_NPF_STATS, READ, sizeof(uptr));\n  _(IOC_NPF_SAVE, WRITE, struct_nvlist_ref_sz);\n  _(IOC_NPF_RULE, READWRITE, struct_nvlist_ref_sz);\n  _(IOC_NPF_CONN_LOOKUP, READWRITE, struct_nvlist_ref_sz);\n  _(IOC_NPF_TABLE_REPLACE, READWRITE, struct_nvlist_ref_sz);\n  /* Entries from file: net/if_pppoe.h */\n  _(PPPOESETPARMS, READ, struct_pppoediscparms_sz);\n  _(PPPOEGETPARMS, READWRITE, struct_pppoediscparms_sz);\n  _(PPPOEGETSESSION, READWRITE, struct_pppoeconnectionstate_sz);\n  /* Entries from file: net/if_sppp.h */\n  _(SPPPGETAUTHCFG, READWRITE, struct_spppauthcfg_sz);\n  _(SPPPSETAUTHCFG, READ, struct_spppauthcfg_sz);\n  _(SPPPGETLCPCFG, READWRITE, struct_sppplcpcfg_sz);\n  _(SPPPSETLCPCFG, READ, struct_sppplcpcfg_sz);\n  _(SPPPGETSTATUS, READWRITE, struct_spppstatus_sz);\n  _(SPPPGETSTATUSNCP, READWRITE, struct_spppstatusncp_sz);\n  _(SPPPGETIDLETO, READWRITE, struct_spppidletimeout_sz);\n  _(SPPPSETIDLETO, READ, struct_spppidletimeout_sz);\n  _(SPPPGETAUTHFAILURES, READWRITE, struct_spppauthfailurestats_sz);\n  _(SPPPSETAUTHFAILURE, READ, struct_spppauthfailuresettings_sz);\n  _(SPPPSETDNSOPTS, READ, struct_spppdnssettings_sz);\n  _(SPPPGETDNSOPTS, READWRITE, struct_spppdnssettings_sz);\n  _(SPPPGETDNSADDRS, READWRITE, struct_spppdnsaddrs_sz);\n  _(SPPPSETKEEPALIVE, READ, struct_spppkeepalivesettings_sz);\n  _(SPPPGETKEEPALIVE, READWRITE, struct_spppkeepalivesettings_sz);\n  /* Entries from file: net/if_srt.h */\n  _(SRT_GETNRT, WRITE, sizeof(unsigned int));\n  _(SRT_GETRT, READWRITE, struct_srt_rt_sz);\n  _(SRT_SETRT, READ, struct_srt_rt_sz);\n  _(SRT_DELRT, READ, sizeof(unsigned int));\n  _(SRT_SFLAGS, READ, sizeof(unsigned int));\n  _(SRT_GFLAGS, WRITE, sizeof(unsigned int));\n  _(SRT_SGFLAGS, READWRITE, sizeof(unsigned int));\n  _(SRT_DEBUG, READ, sizeof(uptr));\n  /* Entries from file: net/if_tap.h */\n  _(TAPGIFNAME, WRITE, struct_ifreq_sz);\n  /* Entries from file: net/if_tun.h */\n  _(TUNSDEBUG, READ, sizeof(int));\n  _(TUNGDEBUG, WRITE, sizeof(int));\n  _(TUNSIFMODE, READ, sizeof(int));\n  _(TUNSIFHEAD, READ, sizeof(int));\n  _(TUNGIFHEAD, WRITE, sizeof(int));\n  /* Entries from file: net/pfvar.h */\n  _(DIOCSTART, NONE, 0);\n  _(DIOCSTOP, NONE, 0);\n  _(DIOCADDRULE, READWRITE, struct_pfioc_rule_sz);\n  _(DIOCGETRULES, READWRITE, struct_pfioc_rule_sz);\n  _(DIOCGETRULE, READWRITE, struct_pfioc_rule_sz);\n  _(DIOCSETLCK, READWRITE, sizeof(u32));\n  _(DIOCCLRSTATES, READWRITE, struct_pfioc_state_kill_sz);\n  _(DIOCGETSTATE, READWRITE, struct_pfioc_state_sz);\n  _(DIOCSETSTATUSIF, READWRITE, struct_pfioc_if_sz);\n  _(DIOCGETSTATUS, READWRITE, struct_pf_status_sz);\n  _(DIOCCLRSTATUS, NONE, 0);\n  _(DIOCNATLOOK, READWRITE, struct_pfioc_natlook_sz);\n  _(DIOCSETDEBUG, READWRITE, sizeof(u32));\n  _(DIOCGETSTATES, READWRITE, struct_pfioc_states_sz);\n  _(DIOCCHANGERULE, READWRITE, struct_pfioc_rule_sz);\n  _(DIOCSETTIMEOUT, READWRITE, struct_pfioc_tm_sz);\n  _(DIOCGETTIMEOUT, READWRITE, struct_pfioc_tm_sz);\n  _(DIOCADDSTATE, READWRITE, struct_pfioc_state_sz);\n  _(DIOCCLRRULECTRS, NONE, 0);\n  _(DIOCGETLIMIT, READWRITE, struct_pfioc_limit_sz);\n  _(DIOCSETLIMIT, READWRITE, struct_pfioc_limit_sz);\n  _(DIOCKILLSTATES, READWRITE, struct_pfioc_state_kill_sz);\n  _(DIOCSTARTALTQ, NONE, 0);\n  _(DIOCSTOPALTQ, NONE, 0);\n  _(DIOCADDALTQ, READWRITE, struct_pfioc_altq_sz);\n  _(DIOCGETALTQS, READWRITE, struct_pfioc_altq_sz);\n  _(DIOCGETALTQ, READWRITE, struct_pfioc_altq_sz);\n  _(DIOCCHANGEALTQ, READWRITE, struct_pfioc_altq_sz);\n  _(DIOCGETQSTATS, READWRITE, struct_pfioc_qstats_sz);\n  _(DIOCBEGINADDRS, READWRITE, struct_pfioc_pooladdr_sz);\n  _(DIOCADDADDR, READWRITE, struct_pfioc_pooladdr_sz);\n  _(DIOCGETADDRS, READWRITE, struct_pfioc_pooladdr_sz);\n  _(DIOCGETADDR, READWRITE, struct_pfioc_pooladdr_sz);\n  _(DIOCCHANGEADDR, READWRITE, struct_pfioc_pooladdr_sz);\n  _(DIOCADDSTATES, READWRITE, struct_pfioc_states_sz);\n  _(DIOCGETRULESETS, READWRITE, struct_pfioc_ruleset_sz);\n  _(DIOCGETRULESET, READWRITE, struct_pfioc_ruleset_sz);\n  _(DIOCRCLRTABLES, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRADDTABLES, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRDELTABLES, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRGETTABLES, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRGETTSTATS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRCLRTSTATS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRCLRADDRS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRADDADDRS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRDELADDRS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRSETADDRS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRGETADDRS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRGETASTATS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRCLRASTATS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRTSTADDRS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRSETTFLAGS, READWRITE, struct_pfioc_table_sz);\n  _(DIOCRINADEFINE, READWRITE, struct_pfioc_table_sz);\n  _(DIOCOSFPFLUSH, NONE, 0);\n  _(DIOCOSFPADD, READWRITE, struct_pf_osfp_ioctl_sz);\n  _(DIOCOSFPGET, READWRITE, struct_pf_osfp_ioctl_sz);\n  _(DIOCXBEGIN, READWRITE, struct_pfioc_trans_sz);\n  _(DIOCXCOMMIT, READWRITE, struct_pfioc_trans_sz);\n  _(DIOCXROLLBACK, READWRITE, struct_pfioc_trans_sz);\n  _(DIOCGETSRCNODES, READWRITE, struct_pfioc_src_nodes_sz);\n  _(DIOCCLRSRCNODES, NONE, 0);\n  _(DIOCSETHOSTID, READWRITE, sizeof(u32));\n  _(DIOCIGETIFACES, READWRITE, struct_pfioc_iface_sz);\n  _(DIOCSETIFFLAG, READWRITE, struct_pfioc_iface_sz);\n  _(DIOCCLRIFFLAG, READWRITE, struct_pfioc_iface_sz);\n  _(DIOCKILLSRCNODES, READWRITE, struct_pfioc_src_node_kill_sz);\n  /* Entries from file: netbt/hci.h */\n  _(SIOCGBTINFO, READWRITE, struct_btreq_sz);\n  _(SIOCGBTINFOA, READWRITE, struct_btreq_sz);\n  _(SIOCNBTINFO, READWRITE, struct_btreq_sz);\n  _(SIOCSBTFLAGS, READWRITE, struct_btreq_sz);\n  _(SIOCSBTPOLICY, READWRITE, struct_btreq_sz);\n  _(SIOCSBTPTYPE, READWRITE, struct_btreq_sz);\n  _(SIOCGBTSTATS, READWRITE, struct_btreq_sz);\n  _(SIOCZBTSTATS, READWRITE, struct_btreq_sz);\n  _(SIOCBTDUMP, READ, struct_btreq_sz);\n  _(SIOCSBTSCOMTU, READWRITE, struct_btreq_sz);\n  _(SIOCGBTFEAT, READWRITE, struct_btreq_sz);\n  /* Entries from file: netinet/ip_nat.h */\n  _(SIOCADNAT, READ, struct_ipfobj_sz);\n  _(SIOCRMNAT, READ, struct_ipfobj_sz);\n  _(SIOCGNATS, READWRITE, struct_ipfobj_sz);\n  _(SIOCGNATL, READWRITE, struct_ipfobj_sz);\n  _(SIOCPURGENAT, READWRITE, struct_ipfobj_sz);\n  /* Entries from file: netinet/sctp_uio.h */\n  _(SIOCCONNECTX, READWRITE, struct_sctp_connectx_addrs_sz);\n  _(SIOCCONNECTXDEL, READWRITE, struct_sctp_connectx_addrs_sz);\n  /* Entries from file: netinet6/in6_var.h */\n  _(SIOCSIFINFO_FLAGS, READWRITE, struct_in6_ndireq_sz);\n  _(SIOCAADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz);\n  _(SIOCDADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz);\n  /* Entries from file: netsmb/smb_dev.h */\n  _(SMBIOC_OPENSESSION, READ, struct_smbioc_ossn_sz);\n  _(SMBIOC_OPENSHARE, READ, struct_smbioc_oshare_sz);\n  _(SMBIOC_REQUEST, READWRITE, struct_smbioc_rq_sz);\n  _(SMBIOC_SETFLAGS, READ, struct_smbioc_flags_sz);\n  _(SMBIOC_LOOKUP, READ, struct_smbioc_lookup_sz);\n  _(SMBIOC_READ, READWRITE, struct_smbioc_rw_sz);\n  _(SMBIOC_WRITE, READWRITE, struct_smbioc_rw_sz);\n  /* Entries from file: sys/agpio.h */\n  _(AGPIOC_INFO, WRITE, struct__agp_info_sz);\n  _(AGPIOC_ACQUIRE, NONE, 0);\n  _(AGPIOC_RELEASE, NONE, 0);\n  _(AGPIOC_SETUP, READ, struct__agp_setup_sz);\n  _(AGPIOC_ALLOCATE, READWRITE, struct__agp_allocate_sz);\n  _(AGPIOC_DEALLOCATE, READ, sizeof(int));\n  _(AGPIOC_BIND, READ, struct__agp_bind_sz);\n  _(AGPIOC_UNBIND, READ, struct__agp_unbind_sz);\n  /* Entries from file: sys/audioio.h */\n  _(AUDIO_GETINFO, WRITE, struct_audio_info_sz);\n  _(AUDIO_SETINFO, READWRITE, struct_audio_info_sz);\n  _(AUDIO_DRAIN, NONE, 0);\n  _(AUDIO_FLUSH, NONE, 0);\n  _(AUDIO_WSEEK, WRITE, sizeof(unsigned long));\n  _(AUDIO_RERROR, WRITE, sizeof(int));\n  _(AUDIO_GETDEV, WRITE, struct_audio_device_sz);\n  _(AUDIO_GETENC, READWRITE, struct_audio_encoding_sz);\n  _(AUDIO_GETFD, WRITE, sizeof(int));\n  _(AUDIO_SETFD, READWRITE, sizeof(int));\n  _(AUDIO_PERROR, WRITE, sizeof(int));\n  _(AUDIO_GETIOFFS, WRITE, struct_audio_offset_sz);\n  _(AUDIO_GETOOFFS, WRITE, struct_audio_offset_sz);\n  _(AUDIO_GETPROPS, WRITE, sizeof(int));\n  _(AUDIO_GETBUFINFO, WRITE, struct_audio_info_sz);\n  _(AUDIO_SETCHAN, READ, sizeof(int));\n  _(AUDIO_GETCHAN, WRITE, sizeof(int));\n  _(AUDIO_QUERYFORMAT, READWRITE, struct_audio_format_query_sz);\n  _(AUDIO_GETFORMAT, WRITE, struct_audio_info_sz);\n  _(AUDIO_SETFORMAT, READ, struct_audio_info_sz);\n  _(AUDIO_MIXER_READ, READWRITE, struct_mixer_ctrl_sz);\n  _(AUDIO_MIXER_WRITE, READWRITE, struct_mixer_ctrl_sz);\n  _(AUDIO_MIXER_DEVINFO, READWRITE, struct_mixer_devinfo_sz);\n  /* Entries from file: sys/ataio.h */\n  _(ATAIOCCOMMAND, READWRITE, struct_atareq_sz);\n  _(ATABUSIOSCAN, READ, struct_atabusioscan_args_sz);\n  _(ATABUSIORESET, NONE, 0);\n  _(ATABUSIODETACH, READ, struct_atabusiodetach_args_sz);\n  /* Entries from file: sys/cdio.h */\n  _(CDIOCPLAYTRACKS, READ, struct_ioc_play_track_sz);\n  _(CDIOCPLAYBLOCKS, READ, struct_ioc_play_blocks_sz);\n  _(CDIOCREADSUBCHANNEL, READWRITE, struct_ioc_read_subchannel_sz);\n  _(CDIOREADTOCHEADER, WRITE, struct_ioc_toc_header_sz);\n  _(CDIOREADTOCENTRIES, READWRITE, struct_ioc_read_toc_entry_sz);\n  _(CDIOREADMSADDR, READWRITE, sizeof(int));\n  _(CDIOCSETPATCH, READ, struct_ioc_patch_sz);\n  _(CDIOCGETVOL, WRITE, struct_ioc_vol_sz);\n  _(CDIOCSETVOL, READ, struct_ioc_vol_sz);\n  _(CDIOCSETMONO, NONE, 0);\n  _(CDIOCSETSTEREO, NONE, 0);\n  _(CDIOCSETMUTE, NONE, 0);\n  _(CDIOCSETLEFT, NONE, 0);\n  _(CDIOCSETRIGHT, NONE, 0);\n  _(CDIOCSETDEBUG, NONE, 0);\n  _(CDIOCCLRDEBUG, NONE, 0);\n  _(CDIOCPAUSE, NONE, 0);\n  _(CDIOCRESUME, NONE, 0);\n  _(CDIOCRESET, NONE, 0);\n  _(CDIOCSTART, NONE, 0);\n  _(CDIOCSTOP, NONE, 0);\n  _(CDIOCEJECT, NONE, 0);\n  _(CDIOCALLOW, NONE, 0);\n  _(CDIOCPREVENT, NONE, 0);\n  _(CDIOCCLOSE, NONE, 0);\n  _(CDIOCPLAYMSF, READ, struct_ioc_play_msf_sz);\n  _(CDIOCLOADUNLOAD, READ, struct_ioc_load_unload_sz);\n  /* Entries from file: sys/chio.h */\n  _(CHIOMOVE, READ, struct_changer_move_request_sz);\n  _(CHIOEXCHANGE, READ, struct_changer_exchange_request_sz);\n  _(CHIOPOSITION, READ, struct_changer_position_request_sz);\n  _(CHIOSPICKER, READ, sizeof(int));\n  _(CHIOGPARAMS, WRITE, struct_changer_params_sz);\n  _(CHIOIELEM, NONE, 0);\n  _(OCHIOGSTATUS, READ, struct_ochanger_element_status_request_sz);\n  _(CHIOGSTATUS, READ, struct_changer_element_status_request_sz);\n  _(CHIOSVOLTAG, READ, struct_changer_set_voltag_request_sz);\n  /* Entries from file: sys/clockctl.h */\n  _(CLOCKCTL_SETTIMEOFDAY, READ, struct_clockctl_settimeofday_sz);\n  _(CLOCKCTL_ADJTIME, READWRITE, struct_clockctl_adjtime_sz);\n  _(CLOCKCTL_CLOCK_SETTIME, READ, struct_clockctl_clock_settime_sz);\n  _(CLOCKCTL_NTP_ADJTIME, READWRITE, struct_clockctl_ntp_adjtime_sz);\n  /* Entries from file: sys/cpuio.h */\n  _(IOC_CPU_SETSTATE, READ, struct_cpustate_sz);\n  _(IOC_CPU_GETSTATE, READWRITE, struct_cpustate_sz);\n  _(IOC_CPU_GETCOUNT, WRITE, sizeof(int));\n  _(IOC_CPU_MAPID, READWRITE, sizeof(int));\n  _(IOC_CPU_UCODE_GET_VERSION, READWRITE, struct_cpu_ucode_version_sz);\n  _(IOC_CPU_UCODE_APPLY, READ, struct_cpu_ucode_sz);\n  /* Entries from file: sys/dkio.h */\n  _(DIOCGDINFO, WRITE, struct_disklabel_sz);\n  _(DIOCSDINFO, READ, struct_disklabel_sz);\n  _(DIOCWDINFO, READ, 0);\n  _(DIOCRFORMAT, READWRITE, struct_format_op_sz);\n  _(DIOCWFORMAT, READWRITE, struct_format_op_sz);\n  _(DIOCSSTEP, READ, sizeof(int));\n  _(DIOCSRETRIES, READ, sizeof(int));\n  _(DIOCKLABEL, READ, sizeof(int));\n  _(DIOCWLABEL, READ, sizeof(int));\n  _(DIOCSBAD, READ, struct_dkbad_sz);\n  _(DIOCEJECT, READ, sizeof(int));\n  _(ODIOCEJECT, NONE, 0);\n  _(DIOCLOCK, READ, sizeof(int));\n  _(DIOCGDEFLABEL, WRITE, struct_disklabel_sz);\n  _(DIOCCLRLABEL, NONE, 0);\n  _(DIOCGCACHE, WRITE, sizeof(int));\n  _(DIOCSCACHE, READ, sizeof(int));\n  _(DIOCCACHESYNC, READ, sizeof(int));\n  _(DIOCBSLIST, READWRITE, struct_disk_badsecinfo_sz);\n  _(DIOCBSFLUSH, NONE, 0);\n  _(DIOCAWEDGE, READWRITE, struct_dkwedge_info_sz);\n  _(DIOCGWEDGEINFO, WRITE, struct_dkwedge_info_sz);\n  _(DIOCDWEDGE, READ, struct_dkwedge_info_sz);\n  _(DIOCLWEDGES, READWRITE, struct_dkwedge_list_sz);\n  _(DIOCGSTRATEGY, WRITE, struct_disk_strategy_sz);\n  _(DIOCSSTRATEGY, READ, struct_disk_strategy_sz);\n  _(DIOCGDISKINFO, WRITE, struct_plistref_sz);\n  _(DIOCTUR, WRITE, sizeof(int));\n  _(DIOCMWEDGES, WRITE, sizeof(int));\n  _(DIOCGSECTORSIZE, WRITE, sizeof(unsigned int));\n  _(DIOCGMEDIASIZE, WRITE, sizeof(uptr));\n  _(DIOCRMWEDGES, WRITE, sizeof(int));\n  /* Entries from file: sys/drvctlio.h */\n  _(DRVDETACHDEV, READ, struct_devdetachargs_sz);\n  _(DRVRESCANBUS, READ, struct_devrescanargs_sz);\n  _(DRVCTLCOMMAND, READWRITE, struct_plistref_sz);\n  _(DRVRESUMEDEV, READ, struct_devpmargs_sz);\n  _(DRVLISTDEV, READWRITE, struct_devlistargs_sz);\n  _(DRVGETEVENT, WRITE, struct_plistref_sz);\n  _(DRVSUSPENDDEV, READ, struct_devpmargs_sz);\n  /* Entries from file: sys/dvdio.h */\n  _(DVD_READ_STRUCT, READWRITE, union_dvd_struct_sz);\n  _(DVD_WRITE_STRUCT, READWRITE, union_dvd_struct_sz);\n  _(DVD_AUTH, READWRITE, union_dvd_authinfo_sz);\n  /* Entries from file: sys/envsys.h */\n  _(ENVSYS_GETDICTIONARY, READWRITE, struct_plistref_sz);\n  _(ENVSYS_SETDICTIONARY, READWRITE, struct_plistref_sz);\n  _(ENVSYS_REMOVEPROPS, READWRITE, struct_plistref_sz);\n  _(ENVSYS_GTREDATA, READWRITE, struct_envsys_tre_data_sz);\n  _(ENVSYS_GTREINFO, READWRITE, struct_envsys_basic_info_sz);\n  /* Entries from file: sys/event.h */\n  _(KFILTER_BYFILTER, READWRITE, struct_kfilter_mapping_sz);\n  _(KFILTER_BYNAME, READWRITE, struct_kfilter_mapping_sz);\n  /* Entries from file: sys/fdio.h */\n  _(FDIOCGETOPTS, WRITE, 0);\n  _(FDIOCSETOPTS, READ, sizeof(int));\n  _(FDIOCSETFORMAT, READ, struct_fdformat_parms_sz);\n  _(FDIOCGETFORMAT, WRITE, struct_fdformat_parms_sz);\n  _(FDIOCFORMAT_TRACK, READ, struct_fdformat_cmd_sz);\n  /* Entries from file: sys/filio.h */\n  _(FIOCLEX, NONE, 0);\n  _(FIONCLEX, NONE, 0);\n  _(FIOSEEKDATA, READWRITE, sizeof(uptr));\n  _(FIOSEEKHOLE, READWRITE, sizeof(uptr));\n  _(FIONREAD, WRITE, sizeof(int));\n  _(FIONBIO, READ, sizeof(int));\n  _(FIOASYNC, READ, sizeof(int));\n  _(FIOSETOWN, READ, sizeof(int));\n  _(FIOGETOWN, WRITE, sizeof(int));\n  _(OFIOGETBMAP, READWRITE, sizeof(u32));\n  _(FIOGETBMAP, READWRITE, sizeof(u64));\n  _(FIONWRITE, WRITE, sizeof(int));\n  _(FIONSPACE, WRITE, sizeof(int));\n  /* Entries from file: sys/gpio.h */\n  _(GPIOINFO, WRITE, struct_gpio_info_sz);\n  _(GPIOSET, READWRITE, struct_gpio_set_sz);\n  _(GPIOUNSET, READWRITE, struct_gpio_set_sz);\n  _(GPIOREAD, READWRITE, struct_gpio_req_sz);\n  _(GPIOWRITE, READWRITE, struct_gpio_req_sz);\n  _(GPIOTOGGLE, READWRITE, struct_gpio_req_sz);\n  _(GPIOATTACH, READWRITE, struct_gpio_attach_sz);\n  /* Entries from file: sys/ioctl.h */\n  _(PTIOCNETBSD, READ, struct_ioctl_pt_sz);\n  _(PTIOCSUNOS, READ, struct_ioctl_pt_sz);\n  _(PTIOCLINUX, READ, struct_ioctl_pt_sz);\n  _(PTIOCFREEBSD, READ, struct_ioctl_pt_sz);\n  _(PTIOCULTRIX, READ, struct_ioctl_pt_sz);\n  /* Entries from file: sys/ioctl_compat.h */\n  _(TIOCHPCL, NONE, 0);\n  _(TIOCGETP, WRITE, struct_sgttyb_sz);\n  _(TIOCSETP, READ, struct_sgttyb_sz);\n  _(TIOCSETN, READ, 0);\n  _(TIOCSETC, READ, struct_tchars_sz);\n  _(TIOCGETC, WRITE, struct_tchars_sz);\n  _(TIOCLBIS, READ, sizeof(int));\n  _(TIOCLBIC, READ, sizeof(int));\n  _(TIOCLSET, READ, sizeof(int));\n  _(TIOCLGET, WRITE, sizeof(int));\n  _(TIOCSLTC, READ, struct_ltchars_sz);\n  _(TIOCGLTC, WRITE, struct_ltchars_sz);\n  _(OTIOCCONS, NONE, 0);\n  /* Entries from file: sys/joystick.h */\n  _(JOY_SETTIMEOUT, READ, sizeof(int));\n  _(JOY_GETTIMEOUT, WRITE, sizeof(int));\n  _(JOY_SET_X_OFFSET, READ, sizeof(int));\n  _(JOY_SET_Y_OFFSET, READ, sizeof(int));\n  _(JOY_GET_Y_OFFSET, WRITE, sizeof(int));\n  /* Entries from file: sys/ksyms.h */\n  _(OKIOCGSYMBOL, READ, struct_ksyms_ogsymbol_sz);\n  _(OKIOCGVALUE, READ, struct_ksyms_ogsymbol_sz);\n  _(KIOCGSIZE, WRITE, sizeof(int));\n  _(KIOCGVALUE, READWRITE, struct_ksyms_gvalue_sz);\n  _(KIOCGSYMBOL, READWRITE, struct_ksyms_gsymbol_sz);\n  /* Entries from file: sys/lua.h */\n  _(LUAINFO, READWRITE, struct_lua_info_sz);\n  _(LUACREATE, READWRITE, struct_lua_create_sz);\n  _(LUADESTROY, READWRITE, struct_lua_create_sz);\n  _(LUAREQUIRE, READWRITE, struct_lua_require_sz);\n  _(LUALOAD, READWRITE, struct_lua_load_sz);\n  /* Entries from file: sys/midiio.h */\n  _(MIDI_PRETIME, READWRITE, sizeof(int));\n  _(MIDI_MPUMODE, READWRITE, sizeof(int));\n  _(MIDI_MPUCMD, READWRITE, struct_mpu_command_rec_sz);\n  _(SEQUENCER_RESET, NONE, 0);\n  _(SEQUENCER_SYNC, NONE, 0);\n  _(SEQUENCER_INFO, READWRITE, struct_synth_info_sz);\n  _(SEQUENCER_CTRLRATE, READWRITE, sizeof(int));\n  _(SEQUENCER_GETOUTCOUNT, WRITE, sizeof(int));\n  _(SEQUENCER_GETINCOUNT, WRITE, sizeof(int));\n  _(SEQUENCER_RESETSAMPLES, READ, sizeof(int));\n  _(SEQUENCER_NRSYNTHS, WRITE, sizeof(int));\n  _(SEQUENCER_NRMIDIS, WRITE, sizeof(int));\n  _(SEQUENCER_THRESHOLD, READ, sizeof(int));\n  _(SEQUENCER_MEMAVL, READWRITE, sizeof(int));\n  _(SEQUENCER_PANIC, NONE, 0);\n  _(SEQUENCER_OUTOFBAND, READ, struct_seq_event_rec_sz);\n  _(SEQUENCER_GETTIME, WRITE, sizeof(int));\n  _(SEQUENCER_TMR_TIMEBASE, READWRITE, sizeof(int));\n  _(SEQUENCER_TMR_START, NONE, 0);\n  _(SEQUENCER_TMR_STOP, NONE, 0);\n  _(SEQUENCER_TMR_CONTINUE, NONE, 0);\n  _(SEQUENCER_TMR_TEMPO, READWRITE, sizeof(int));\n  _(SEQUENCER_TMR_SOURCE, READWRITE, sizeof(int));\n  _(SEQUENCER_TMR_METRONOME, READ, sizeof(int));\n  _(SEQUENCER_TMR_SELECT, READ, sizeof(int));\n  /* Entries from file: sys/mtio.h */\n  _(MTIOCTOP, READ, struct_mtop_sz);\n  _(MTIOCGET, WRITE, struct_mtget_sz);\n  _(MTIOCIEOT, NONE, 0);\n  _(MTIOCEEOT, NONE, 0);\n  _(MTIOCRDSPOS, WRITE, sizeof(u32));\n  _(MTIOCRDHPOS, WRITE, sizeof(u32));\n  _(MTIOCSLOCATE, READ, sizeof(u32));\n  _(MTIOCHLOCATE, READ, sizeof(u32));\n  /* Entries from file: sys/power.h */\n  _(POWER_EVENT_RECVDICT, READWRITE, struct_plistref_sz);\n  _(POWER_IOC_GET_TYPE, WRITE, struct_power_type_sz);\n  /* Entries from file: sys/radioio.h */\n  _(RIOCGINFO, WRITE, struct_radio_info_sz);\n  _(RIOCSINFO, READWRITE, struct_radio_info_sz);\n  _(RIOCSSRCH, READ, sizeof(int));\n  /* Entries from file: sys/rndio.h */\n  _(RNDGETENTCNT, WRITE, sizeof(u32));\n  _(RNDGETSRCNUM, READWRITE, struct_rndstat_sz);\n  _(RNDGETSRCNAME, READWRITE, struct_rndstat_name_sz);\n  _(RNDCTL, READ, struct_rndctl_sz);\n  _(RNDADDDATA, READ, struct_rnddata_sz);\n  _(RNDGETPOOLSTAT, WRITE, struct_rndpoolstat_sz);\n  _(RNDGETESTNUM, READWRITE, struct_rndstat_est_sz);\n  _(RNDGETESTNAME, READWRITE, struct_rndstat_est_name_sz);\n  /* Entries from file: sys/scanio.h */\n  _(SCIOCGET, WRITE, struct_scan_io_sz);\n  _(SCIOCSET, READ, struct_scan_io_sz);\n  _(SCIOCRESTART, NONE, 0);\n  /* Entries from file: sys/scsiio.h */\n  _(SCIOCCOMMAND, READWRITE, struct_scsireq_sz);\n  _(SCIOCDEBUG, READ, sizeof(int));\n  _(SCIOCIDENTIFY, WRITE, struct_scsi_addr_sz);\n  _(OSCIOCIDENTIFY, WRITE, struct_oscsi_addr_sz);\n  _(SCIOCDECONFIG, NONE, 0);\n  _(SCIOCRECONFIG, NONE, 0);\n  _(SCIOCRESET, NONE, 0);\n  _(SCBUSIOSCAN, READ, struct_scbusioscan_args_sz);\n  _(SCBUSIORESET, NONE, 0);\n  _(SCBUSIODETACH, READ, struct_scbusiodetach_args_sz);\n  _(SCBUSACCEL, READ, struct_scbusaccel_args_sz);\n  /* Entries from file: sys/sockio.h */\n  _(SIOCSHIWAT, READ, sizeof(int));\n  _(SIOCGHIWAT, WRITE, sizeof(int));\n  _(SIOCSLOWAT, READ, sizeof(int));\n  _(SIOCGLOWAT, WRITE, sizeof(int));\n  _(SIOCATMARK, WRITE, sizeof(int));\n  _(SIOCSPGRP, READ, sizeof(int));\n  _(SIOCGPGRP, WRITE, sizeof(int));\n  _(SIOCPEELOFF, READWRITE, sizeof(int));\n  _(SIOCADDRT, READ, struct_ortentry_sz);\n  _(SIOCDELRT, READ, struct_ortentry_sz);\n  _(SIOCSIFADDR, READ, struct_ifreq_sz);\n  _(SIOCGIFADDR, READWRITE, struct_ifreq_sz);\n  _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);\n  _(SIOCGIFDSTADDR, READWRITE, struct_ifreq_sz);\n  _(SIOCSIFFLAGS, READ, struct_ifreq_sz);\n  _(SIOCGIFFLAGS, READWRITE, struct_ifreq_sz);\n  _(SIOCGIFBRDADDR, READWRITE, struct_ifreq_sz);\n  _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);\n  _(SIOCGIFCONF, READWRITE, struct_ifconf_sz);\n  _(SIOCGIFNETMASK, READWRITE, struct_ifreq_sz);\n  _(SIOCSIFNETMASK, READ, struct_ifreq_sz);\n  _(SIOCGIFMETRIC, READWRITE, struct_ifreq_sz);\n  _(SIOCSIFMETRIC, READ, struct_ifreq_sz);\n  _(SIOCDIFADDR, READ, struct_ifreq_sz);\n  _(SIOCAIFADDR, READ, struct_ifaliasreq_sz);\n  _(SIOCGIFALIAS, READWRITE, struct_ifaliasreq_sz);\n  _(SIOCGIFAFLAG_IN, READWRITE, struct_ifreq_sz);\n  _(SIOCALIFADDR, READ, struct_if_laddrreq_sz);\n  _(SIOCGLIFADDR, READWRITE, struct_if_laddrreq_sz);\n  _(SIOCDLIFADDR, READ, struct_if_laddrreq_sz);\n  _(SIOCSIFADDRPREF, READ, struct_if_addrprefreq_sz);\n  _(SIOCGIFADDRPREF, READWRITE, struct_if_addrprefreq_sz);\n  _(SIOCADDMULTI, READ, struct_ifreq_sz);\n  _(SIOCDELMULTI, READ, struct_ifreq_sz);\n  _(SIOCGETVIFCNT, READWRITE, struct_sioc_vif_req_sz);\n  _(SIOCGETSGCNT, READWRITE, struct_sioc_sg_req_sz);\n  _(SIOCSIFMEDIA, READWRITE, struct_ifreq_sz);\n  _(SIOCGIFMEDIA, READWRITE, struct_ifmediareq_sz);\n  _(SIOCSIFGENERIC, READ, struct_ifreq_sz);\n  _(SIOCGIFGENERIC, READWRITE, struct_ifreq_sz);\n  _(SIOCSIFPHYADDR, READ, struct_ifaliasreq_sz);\n  _(SIOCGIFPSRCADDR, READWRITE, struct_ifreq_sz);\n  _(SIOCGIFPDSTADDR, READWRITE, struct_ifreq_sz);\n  _(SIOCDIFPHYADDR, READ, struct_ifreq_sz);\n  _(SIOCSLIFPHYADDR, READ, struct_if_laddrreq_sz);\n  _(SIOCGLIFPHYADDR, READWRITE, struct_if_laddrreq_sz);\n  _(SIOCSIFMTU, READ, struct_ifreq_sz);\n  _(SIOCGIFMTU, READWRITE, struct_ifreq_sz);\n  _(SIOCSDRVSPEC, READ, struct_ifdrv_sz);\n  _(SIOCGDRVSPEC, READWRITE, struct_ifdrv_sz);\n  _(SIOCIFCREATE, READ, struct_ifreq_sz);\n  _(SIOCIFDESTROY, READ, struct_ifreq_sz);\n  _(SIOCIFGCLONERS, READWRITE, struct_if_clonereq_sz);\n  _(SIOCGIFDLT, READWRITE, struct_ifreq_sz);\n  _(SIOCGIFCAP, READWRITE, struct_ifcapreq_sz);\n  _(SIOCSIFCAP, READ, struct_ifcapreq_sz);\n  _(SIOCSVH, READWRITE, struct_ifreq_sz);\n  _(SIOCGVH, READWRITE, struct_ifreq_sz);\n  _(SIOCINITIFADDR, READWRITE, struct_ifaddr_sz);\n  _(SIOCGIFDATA, READWRITE, struct_ifdatareq_sz);\n  _(SIOCZIFDATA, READWRITE, struct_ifdatareq_sz);\n  _(SIOCGLINKSTR, READWRITE, struct_ifdrv_sz);\n  _(SIOCSLINKSTR, READ, struct_ifdrv_sz);\n  _(SIOCGETHERCAP, READWRITE, struct_eccapreq_sz);\n  _(SIOCGIFINDEX, READWRITE, struct_ifreq_sz);\n  _(SIOCSETHERCAP, READ, struct_eccapreq_sz);\n  _(SIOCSIFDESCR, READ, struct_ifreq_sz);\n  _(SIOCGIFDESCR, READWRITE, struct_ifreq_sz);\n  _(SIOCGUMBINFO, READWRITE, struct_ifreq_sz);\n  _(SIOCSUMBPARAM, READ, struct_ifreq_sz);\n  _(SIOCGUMBPARAM, READWRITE, struct_ifreq_sz);\n  _(SIOCSETPFSYNC, READ, struct_ifreq_sz);\n  _(SIOCGETPFSYNC, READWRITE, struct_ifreq_sz);\n  /* Entries from file: sys/timepps.h */\n  _(PPS_IOC_CREATE, NONE, 0);\n  _(PPS_IOC_DESTROY, NONE, 0);\n  _(PPS_IOC_SETPARAMS, READ, struct_pps_params_sz);\n  _(PPS_IOC_GETPARAMS, WRITE, struct_pps_params_sz);\n  _(PPS_IOC_GETCAP, WRITE, sizeof(int));\n  _(PPS_IOC_FETCH, READWRITE, struct_pps_info_sz);\n  _(PPS_IOC_KCBIND, READ, sizeof(int));\n  /* Entries from file: sys/ttycom.h */\n  _(TIOCEXCL, NONE, 0);\n  _(TIOCNXCL, NONE, 0);\n  _(TIOCFLUSH, READ, sizeof(int));\n  _(TIOCGETA, WRITE, struct_termios_sz);\n  _(TIOCSETA, READ, struct_termios_sz);\n  _(TIOCSETAW, READ, 0);\n  _(TIOCSETAF, READ, 0);\n  _(TIOCGETD, WRITE, sizeof(int));\n  _(TIOCSETD, READ, sizeof(int));\n  _(TIOCGLINED, WRITE, (32 * sizeof(char)));\n  _(TIOCSLINED, READ, (32 * sizeof(char)));\n  _(TIOCSBRK, NONE, 0);\n  _(TIOCCBRK, NONE, 0);\n  _(TIOCSDTR, NONE, 0);\n  _(TIOCCDTR, NONE, 0);\n  _(TIOCGPGRP, WRITE, sizeof(int));\n  _(TIOCSPGRP, READ, sizeof(int));\n  _(TIOCOUTQ, WRITE, sizeof(int));\n  _(TIOCSTI, READ, sizeof(char));\n  _(TIOCNOTTY, NONE, 0);\n  _(TIOCPKT, READ, sizeof(int));\n  _(TIOCSTOP, NONE, 0);\n  _(TIOCSTART, NONE, 0);\n  _(TIOCMSET, READ, sizeof(int));\n  _(TIOCMBIS, READ, sizeof(int));\n  _(TIOCMBIC, READ, sizeof(int));\n  _(TIOCMGET, WRITE, sizeof(int));\n  _(TIOCREMOTE, READ, sizeof(int));\n  _(TIOCGWINSZ, WRITE, struct_winsize_sz);\n  _(TIOCSWINSZ, READ, struct_winsize_sz);\n  _(TIOCUCNTL, READ, sizeof(int));\n  _(TIOCSTAT, READ, sizeof(int));\n  _(TIOCGSID, WRITE, sizeof(int));\n  _(TIOCCONS, READ, sizeof(int));\n  _(TIOCSCTTY, NONE, 0);\n  _(TIOCEXT, READ, sizeof(int));\n  _(TIOCSIG, NONE, 0);\n  _(TIOCDRAIN, NONE, 0);\n  _(TIOCGFLAGS, WRITE, sizeof(int));\n  _(TIOCSFLAGS, READ, sizeof(int));\n  _(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);\n  _(TIOCRCVFRAME, READ, sizeof(uptr));\n  _(TIOCXMTFRAME, READ, sizeof(uptr));\n  _(TIOCPTMGET, WRITE, struct_ptmget_sz);\n  _(TIOCGRANTPT, NONE, 0);\n  _(TIOCPTSNAME, WRITE, struct_ptmget_sz);\n  _(TIOCSQSIZE, READ, sizeof(int));\n  _(TIOCGQSIZE, WRITE, sizeof(int));\n  /* Entries from file: sys/verified_exec.h */\n  _(VERIEXEC_LOAD, READ, struct_plistref_sz);\n  _(VERIEXEC_TABLESIZE, READ, struct_plistref_sz);\n  _(VERIEXEC_DELETE, READ, struct_plistref_sz);\n  _(VERIEXEC_QUERY, READWRITE, struct_plistref_sz);\n  _(VERIEXEC_DUMP, WRITE, struct_plistref_sz);\n  _(VERIEXEC_FLUSH, NONE, 0);\n  /* Entries from file: sys/videoio.h */\n  _(VIDIOC_QUERYCAP, WRITE, struct_v4l2_capability_sz);\n  _(VIDIOC_RESERVED, NONE, 0);\n  _(VIDIOC_ENUM_FMT, READWRITE, struct_v4l2_fmtdesc_sz);\n  _(VIDIOC_G_FMT, READWRITE, struct_v4l2_format_sz);\n  _(VIDIOC_S_FMT, READWRITE, struct_v4l2_format_sz);\n  _(VIDIOC_REQBUFS, READWRITE, struct_v4l2_requestbuffers_sz);\n  _(VIDIOC_QUERYBUF, READWRITE, struct_v4l2_buffer_sz);\n  _(VIDIOC_G_FBUF, WRITE, struct_v4l2_framebuffer_sz);\n  _(VIDIOC_S_FBUF, READ, struct_v4l2_framebuffer_sz);\n  _(VIDIOC_OVERLAY, READ, sizeof(int));\n  _(VIDIOC_QBUF, READWRITE, struct_v4l2_buffer_sz);\n  _(VIDIOC_DQBUF, READWRITE, struct_v4l2_buffer_sz);\n  _(VIDIOC_STREAMON, READ, sizeof(int));\n  _(VIDIOC_STREAMOFF, READ, sizeof(int));\n  _(VIDIOC_G_PARM, READWRITE, struct_v4l2_streamparm_sz);\n  _(VIDIOC_S_PARM, READWRITE, struct_v4l2_streamparm_sz);\n  _(VIDIOC_G_STD, WRITE, sizeof(u64));\n  _(VIDIOC_S_STD, READ, sizeof(u64));\n  _(VIDIOC_ENUMSTD, READWRITE, struct_v4l2_standard_sz);\n  _(VIDIOC_ENUMINPUT, READWRITE, struct_v4l2_input_sz);\n  _(VIDIOC_G_CTRL, READWRITE, struct_v4l2_control_sz);\n  _(VIDIOC_S_CTRL, READWRITE, struct_v4l2_control_sz);\n  _(VIDIOC_G_TUNER, READWRITE, struct_v4l2_tuner_sz);\n  _(VIDIOC_S_TUNER, READ, struct_v4l2_tuner_sz);\n  _(VIDIOC_G_AUDIO, WRITE, struct_v4l2_audio_sz);\n  _(VIDIOC_S_AUDIO, READ, struct_v4l2_audio_sz);\n  _(VIDIOC_QUERYCTRL, READWRITE, struct_v4l2_queryctrl_sz);\n  _(VIDIOC_QUERYMENU, READWRITE, struct_v4l2_querymenu_sz);\n  _(VIDIOC_G_INPUT, WRITE, sizeof(int));\n  _(VIDIOC_S_INPUT, READWRITE, sizeof(int));\n  _(VIDIOC_G_OUTPUT, WRITE, sizeof(int));\n  _(VIDIOC_S_OUTPUT, READWRITE, sizeof(int));\n  _(VIDIOC_ENUMOUTPUT, READWRITE, struct_v4l2_output_sz);\n  _(VIDIOC_G_AUDOUT, WRITE, struct_v4l2_audioout_sz);\n  _(VIDIOC_S_AUDOUT, READ, struct_v4l2_audioout_sz);\n  _(VIDIOC_G_MODULATOR, READWRITE, struct_v4l2_modulator_sz);\n  _(VIDIOC_S_MODULATOR, READ, struct_v4l2_modulator_sz);\n  _(VIDIOC_G_FREQUENCY, READWRITE, struct_v4l2_frequency_sz);\n  _(VIDIOC_S_FREQUENCY, READ, struct_v4l2_frequency_sz);\n  _(VIDIOC_CROPCAP, READWRITE, struct_v4l2_cropcap_sz);\n  _(VIDIOC_G_CROP, READWRITE, struct_v4l2_crop_sz);\n  _(VIDIOC_S_CROP, READ, struct_v4l2_crop_sz);\n  _(VIDIOC_G_JPEGCOMP, WRITE, struct_v4l2_jpegcompression_sz);\n  _(VIDIOC_S_JPEGCOMP, READ, struct_v4l2_jpegcompression_sz);\n  _(VIDIOC_QUERYSTD, WRITE, sizeof(u64));\n  _(VIDIOC_TRY_FMT, READWRITE, struct_v4l2_format_sz);\n  _(VIDIOC_ENUMAUDIO, READWRITE, struct_v4l2_audio_sz);\n  _(VIDIOC_ENUMAUDOUT, READWRITE, struct_v4l2_audioout_sz);\n  _(VIDIOC_G_PRIORITY, WRITE, enum_v4l2_priority_sz);\n  _(VIDIOC_S_PRIORITY, READ, enum_v4l2_priority_sz);\n  _(VIDIOC_ENUM_FRAMESIZES, READWRITE, struct_v4l2_frmsizeenum_sz);\n  _(VIDIOC_ENUM_FRAMEINTERVALS, READWRITE, struct_v4l2_frmivalenum_sz);\n  /* Entries from file: sys/wdog.h */\n  _(WDOGIOC_GMODE, READWRITE, struct_wdog_mode_sz);\n  _(WDOGIOC_SMODE, READ, struct_wdog_mode_sz);\n  _(WDOGIOC_WHICH, WRITE, struct_wdog_mode_sz);\n  _(WDOGIOC_TICKLE, NONE, 0);\n  _(WDOGIOC_GTICKLER, WRITE, sizeof(int));\n  _(WDOGIOC_GWDOGS, READWRITE, struct_wdog_conf_sz);\n  /* Entries from file: sys/kcov.h */\n  _(KCOV_IOC_SETBUFSIZE, READ, sizeof(u64));\n  _(KCOV_IOC_ENABLE, READ, sizeof(int));\n  _(KCOV_IOC_DISABLE, NONE, 0);\n  /* Entries from file: sys/ipmi.h */\n  _(IPMICTL_RECEIVE_MSG_TRUNC, READWRITE, struct_ipmi_recv_sz);\n  _(IPMICTL_RECEIVE_MSG, READWRITE, struct_ipmi_recv_sz);\n  _(IPMICTL_SEND_COMMAND, READ, struct_ipmi_req_sz);\n  _(IPMICTL_REGISTER_FOR_CMD, READ, struct_ipmi_cmdspec_sz);\n  _(IPMICTL_UNREGISTER_FOR_CMD, READ, struct_ipmi_cmdspec_sz);\n  _(IPMICTL_SET_GETS_EVENTS_CMD, READ, sizeof(int));\n  _(IPMICTL_SET_MY_ADDRESS_CMD, READ, sizeof(unsigned int));\n  _(IPMICTL_GET_MY_ADDRESS_CMD, WRITE, sizeof(unsigned int));\n  _(IPMICTL_SET_MY_LUN_CMD, READ, sizeof(unsigned int));\n  _(IPMICTL_GET_MY_LUN_CMD, WRITE, sizeof(unsigned int));\n  /* Entries from file: soundcard.h */\n  _(SNDCTL_DSP_RESET, NONE, 0);\n  _(SNDCTL_DSP_SYNC, NONE, 0);\n  _(SNDCTL_DSP_SPEED, READWRITE, sizeof(int));\n  _(SOUND_PCM_READ_RATE, WRITE, sizeof(int));\n  _(SNDCTL_DSP_STEREO, READWRITE, sizeof(int));\n  _(SNDCTL_DSP_GETBLKSIZE, READWRITE, sizeof(int));\n  _(SNDCTL_DSP_SETFMT, READWRITE, sizeof(int));\n  _(SOUND_PCM_READ_BITS, WRITE, sizeof(int));\n  _(SNDCTL_DSP_CHANNELS, READWRITE, sizeof(int));\n  _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));\n  _(SOUND_PCM_WRITE_FILTER, READWRITE, sizeof(int));\n  _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));\n  _(SNDCTL_DSP_POST, NONE, 0);\n  _(SNDCTL_DSP_SUBDIVIDE, READWRITE, sizeof(int));\n  _(SNDCTL_DSP_SETFRAGMENT, READWRITE, sizeof(int));\n  _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));\n  _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);\n  _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);\n  _(SNDCTL_DSP_NONBLOCK, NONE, 0);\n  _(SNDCTL_DSP_GETCAPS, WRITE, sizeof(int));\n  _(SNDCTL_DSP_GETTRIGGER, WRITE, sizeof(int));\n  _(SNDCTL_DSP_SETTRIGGER, READ, sizeof(int));\n  _(SNDCTL_DSP_GETIPTR, WRITE, struct_count_info_sz);\n  _(SNDCTL_DSP_GETOPTR, WRITE, struct_count_info_sz);\n  _(SNDCTL_DSP_MAPINBUF, WRITE, struct_buffmem_desc_sz);\n  _(SNDCTL_DSP_MAPOUTBUF, WRITE, struct_buffmem_desc_sz);\n  _(SNDCTL_DSP_SETSYNCRO, NONE, 0);\n  _(SNDCTL_DSP_SETDUPLEX, NONE, 0);\n  _(SNDCTL_DSP_PROFILE, READ, sizeof(int));\n  _(SNDCTL_DSP_GETODELAY, WRITE, sizeof(int));\n  _(SOUND_MIXER_INFO, WRITE, struct_mixer_info_sz);\n  _(SOUND_OLD_MIXER_INFO, WRITE, struct__old_mixer_info_sz);\n  _(OSS_GETVERSION, WRITE, sizeof(int));\n  _(SNDCTL_SYSINFO, WRITE, struct_oss_sysinfo_sz);\n  _(SNDCTL_AUDIOINFO, READWRITE, struct_oss_audioinfo_sz);\n  _(SNDCTL_ENGINEINFO, READWRITE, struct_oss_audioinfo_sz);\n  _(SNDCTL_DSP_GETPLAYVOL, WRITE, sizeof(unsigned int));\n  _(SNDCTL_DSP_SETPLAYVOL, READ, sizeof(unsigned int));\n  _(SNDCTL_DSP_GETRECVOL, WRITE, sizeof(unsigned int));\n  _(SNDCTL_DSP_SETRECVOL, READ, sizeof(unsigned int));\n  _(SNDCTL_DSP_SKIP, NONE, 0);\n  _(SNDCTL_DSP_SILENCE, NONE, 0);\n  /* Entries from file: dev/filemon/filemon.h (compat <= 9.99.26) */\n  _(FILEMON_SET_FD, READWRITE, sizeof(int));\n  _(FILEMON_SET_PID, READWRITE, sizeof(int));\n  /* Entries from file: dev/usb/urio.h (compat <= 9.99.43) */\n  _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);\n  _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);\n#undef _\n}\n\nstatic bool ioctl_initialized = false;\n\nstruct ioctl_desc_compare {\n  bool operator()(const ioctl_desc &left, const ioctl_desc &right) const {\n    return left.req < right.req;\n  }\n};\n\nstatic void ioctl_init() {\n  ioctl_table_fill();\n  Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());\n\n  bool bad = false;\n  for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {\n    if (ioctl_table[i].req >= ioctl_table[i + 1].req) {\n      Printf(\"Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\\n\",\n             ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,\n             ioctl_table[i + 1].name);\n      bad = true;\n    }\n  }\n\n  if (bad)\n    Die();\n\n  ioctl_initialized = true;\n}\n\nstatic const ioctl_desc *ioctl_table_lookup(unsigned req) {\n  int left = 0;\n  int right = ioctl_table_size;\n  while (left < right) {\n    int mid = (left + right) / 2;\n    if (ioctl_table[mid].req < req)\n      left = mid + 1;\n    else\n      right = mid;\n  }\n  if (left == right && ioctl_table[left].req == req)\n    return ioctl_table + left;\n  else\n    return nullptr;\n}\n\nstatic bool ioctl_decode(unsigned req, ioctl_desc *desc) {\n  CHECK(desc);\n  desc->req = req;\n  desc->name = \"<DECODED_IOCTL>\";\n  desc->size = IOC_SIZE(req);\n  // Sanity check.\n  if (desc->size > 0xFFFF)\n    return false;\n  unsigned dir = IOC_DIR(req);\n  switch (dir) {\n  case IOC_NONE:\n    desc->type = ioctl_desc::NONE;\n    break;\n  case IOC_READ | IOC_WRITE:\n    desc->type = ioctl_desc::READWRITE;\n    break;\n  case IOC_READ:\n    desc->type = ioctl_desc::WRITE;\n    break;\n  case IOC_WRITE:\n    desc->type = ioctl_desc::READ;\n    break;\n  default:\n    return false;\n  }\n  // Size can be 0 iff type is NONE.\n  if ((desc->type == IOC_NONE) != (desc->size == 0))\n    return false;\n  // Sanity check.\n  if (IOC_TYPE(req) == 0)\n    return false;\n  return true;\n}\n\nstatic const ioctl_desc *ioctl_lookup(unsigned req) {\n  const ioctl_desc *desc = ioctl_table_lookup(req);\n  if (desc)\n    return desc;\n\n  // Try stripping access size from the request id.\n  desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));\n  // Sanity check: requests that encode access size are either read or write and\n  // have size of 0 in the table.\n  if (desc && desc->size == 0 &&\n      (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||\n       desc->type == ioctl_desc::READ))\n    return desc;\n  return nullptr;\n}\n\nstatic void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,\n                             unsigned request, void *arg) {\n  if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {\n    unsigned size = desc->size ? desc->size : IOC_SIZE(request);\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);\n  }\n  if (desc->type != ioctl_desc::CUSTOM)\n    return;\n  if (request == IOCTL_SIOCGIFCONF) {\n    struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;\n    COMMON_INTERCEPTOR_READ_RANGE(ctx, (char *)&ifc->ifc_len,\n                                  sizeof(ifc->ifc_len));\n  }\n}\n\nstatic void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,\n                              unsigned request, void *arg) {\n  if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {\n    // FIXME: add verbose output\n    unsigned size = desc->size ? desc->size : IOC_SIZE(request);\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);\n  }\n  if (desc->type != ioctl_desc::CUSTOM)\n    return;\n  if (request == IOCTL_SIOCGIFCONF) {\n    struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;\n    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);\n  }\n}\n\n#endif // SANITIZER_NETBSD\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_interface_internal.h",
    "content": "//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between run-time libraries of sanitizers.\n//\n// This header declares the sanitizer runtime interface functions.\n// The runtime library has to define these functions so the instrumented program\n// could call them.\n//\n// See also include/sanitizer/common_interface_defs.h\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_INTERFACE_INTERNAL_H\n#define SANITIZER_INTERFACE_INTERNAL_H\n\n#include \"sanitizer_internal_defs.h\"\n\nextern \"C\" {\n  // Tell the tools to write their reports to \"path.<pid>\" instead of stderr.\n  // The special values are \"stdout\" and \"stderr\".\n  SANITIZER_INTERFACE_ATTRIBUTE\n  void __sanitizer_set_report_path(const char *path);\n  // Tell the tools to write their reports to the provided file descriptor\n  // (casted to void *).\n  SANITIZER_INTERFACE_ATTRIBUTE\n  void __sanitizer_set_report_fd(void *fd);\n  // Get the current full report file path, if a path was specified by\n  // an earlier call to __sanitizer_set_report_path. Returns null otherwise.\n  SANITIZER_INTERFACE_ATTRIBUTE\n  const char *__sanitizer_get_report_path();\n\n  typedef struct {\n      int coverage_sandboxed;\n      __sanitizer::sptr coverage_fd;\n      unsigned int coverage_max_block_size;\n  } __sanitizer_sandbox_arguments;\n\n  // Notify the tools that the sandbox is going to be turned on.\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n      __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);\n\n  // This function is called by the tool when it has just finished reporting\n  // an error. 'error_summary' is a one-line string that summarizes\n  // the error message. This function can be overridden by the client.\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_report_error_summary(const char *error_summary);\n\n  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();\n  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(\n      const __sanitizer::uptr *pcs, const __sanitizer::uptr len);\n  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();\n\n  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);\n\n  // Returns 1 on the first call, then returns 0 thereafter.  Called by the tool\n  // to ensure only one report is printed when multiple errors occur\n  // simultaneously.\n  SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();\n\n  SANITIZER_INTERFACE_ATTRIBUTE\n  void __sanitizer_annotate_contiguous_container(const void *beg,\n                                                 const void *end,\n                                                 const void *old_mid,\n                                                 const void *new_mid);\n  SANITIZER_INTERFACE_ATTRIBUTE\n  int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,\n                                              const void *end);\n  SANITIZER_INTERFACE_ATTRIBUTE\n  const void *__sanitizer_contiguous_container_find_bad_address(\n      const void *beg, const void *mid, const void *end);\n\n  SANITIZER_INTERFACE_ATTRIBUTE\n  int __sanitizer_get_module_and_offset_for_pc(\n      __sanitizer::uptr pc, char *module_path,\n      __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);\n\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_cmp();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_cmp1();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_cmp2();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_cmp4();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_cmp8();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_const_cmp1();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_const_cmp2();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_const_cmp4();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_const_cmp8();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_switch();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_div4();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_div8();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_gep();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_pc_indir();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE\n  void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,\n                                           __sanitizer::u32*);\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n  __sanitizer_cov_8bit_counters_init(char *, char *);\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n  __sanitizer_cov_bool_flag_init();\n  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n  __sanitizer_cov_pcs_init(const __sanitizer::uptr *,\n                           const __sanitizer::uptr *);\n} // extern \"C\"\n\n#endif  // SANITIZER_INTERFACE_INTERNAL_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_internal_defs.h",
    "content": "//===-- sanitizer_internal_defs.h -------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer.\n// It contains macro used in run-time libraries code.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_DEFS_H\n#define SANITIZER_DEFS_H\n\n#include \"sanitizer_platform.h\"\n\n#ifndef SANITIZER_DEBUG\n# define SANITIZER_DEBUG 0\n#endif\n\n#define SANITIZER_STRINGIFY_(S) #S\n#define SANITIZER_STRINGIFY(S) SANITIZER_STRINGIFY_(S)\n\n// Only use SANITIZER_*ATTRIBUTE* before the function return type!\n#if SANITIZER_WINDOWS\n#if SANITIZER_IMPORT_INTERFACE\n# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)\n#else\n# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)\n#endif\n# define SANITIZER_WEAK_ATTRIBUTE\n#elif SANITIZER_GO\n# define SANITIZER_INTERFACE_ATTRIBUTE\n# define SANITIZER_WEAK_ATTRIBUTE\n#else\n# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility(\"default\")))\n# define SANITIZER_WEAK_ATTRIBUTE  __attribute__((weak))\n#endif\n\n// TLS is handled differently on different platforms\n#if SANITIZER_LINUX || SANITIZER_NETBSD || \\\n  SANITIZER_FREEBSD\n# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \\\n    __attribute__((tls_model(\"initial-exec\"))) thread_local\n#else\n# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE\n#endif\n\n//--------------------------- WEAK FUNCTIONS ---------------------------------//\n// When working with weak functions, to simplify the code and make it more\n// portable, when possible define a default implementation using this macro:\n//\n// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)\n//\n// For example:\n//   SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }\n//\n#if SANITIZER_WINDOWS\n#include \"sanitizer_win_defs.h\"\n# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...)                   \\\n  WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)\n#else\n# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...)                   \\\n  extern \"C\" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE            \\\n  ReturnType Name(__VA_ARGS__)\n#endif\n\n// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that\n// will evaluate to a null pointer when not defined.\n#ifndef SANITIZER_SUPPORTS_WEAK_HOOKS\n#if (SANITIZER_LINUX || SANITIZER_SOLARIS) && !SANITIZER_GO\n# define SANITIZER_SUPPORTS_WEAK_HOOKS 1\n// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined\n// weak symbols.  Mac OS X 10.9/Darwin 13 is the first release only supported\n// by Xcode >= 4.5.\n#elif SANITIZER_MAC && \\\n    __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO\n# define SANITIZER_SUPPORTS_WEAK_HOOKS 1\n#else\n# define SANITIZER_SUPPORTS_WEAK_HOOKS 0\n#endif\n#endif // SANITIZER_SUPPORTS_WEAK_HOOKS\n// For some weak hooks that will be called very often and we want to avoid the\n// overhead of executing the default implementation when it is not necessary,\n// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default\n// implementation for platforms that doesn't support weak symbols. For example:\n//\n//   #if !SANITIZER_SUPPORT_WEAK_HOOKS\n//     SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {\n//       return a > b;\n//     }\n//   #endif\n//\n// And then use it as: if (compare_hook) compare_hook(a, b);\n//----------------------------------------------------------------------------//\n\n\n// We can use .preinit_array section on Linux to call sanitizer initialization\n// functions very early in the process startup (unless PIC macro is defined).\n//\n// On FreeBSD, .preinit_array functions are called with rtld_bind_lock writer\n// lock held. It will lead to dead lock if unresolved PLT functions (which helds\n// rtld_bind_lock reader lock) are called inside .preinit_array functions.\n//\n// FIXME: do we have anything like this on Mac?\n#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY\n#if (SANITIZER_LINUX || SANITIZER_FUCHSIA || SANITIZER_NETBSD) && !defined(PIC)\n#define SANITIZER_CAN_USE_PREINIT_ARRAY 1\n// Before Solaris 11.4, .preinit_array is fully supported only with GNU ld.\n// FIXME: Check for those conditions.\n#elif SANITIZER_SOLARIS && !defined(PIC)\n# define SANITIZER_CAN_USE_PREINIT_ARRAY 1\n#else\n# define SANITIZER_CAN_USE_PREINIT_ARRAY 0\n#endif\n#endif  // SANITIZER_CAN_USE_PREINIT_ARRAY\n\n// GCC does not understand __has_feature\n#if !defined(__has_feature)\n# define __has_feature(x) 0\n#endif\n\n// Older GCCs do not understand __has_attribute.\n#if !defined(__has_attribute)\n# define __has_attribute(x) 0\n#endif\n\n#if !defined(__has_cpp_attribute)\n#  define __has_cpp_attribute(x) 0\n#endif\n\n// For portability reasons we do not include stddef.h, stdint.h or any other\n// system header, but we do need some basic types that are not defined\n// in a portable way by the language itself.\nnamespace __sanitizer {\n\n#if defined(_WIN64)\n// 64-bit Windows uses LLP64 data model.\ntypedef unsigned long long uptr;\ntypedef signed long long sptr;\n#else\n#  if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC || SANITIZER_WINDOWS\ntypedef unsigned long uptr;\ntypedef signed long sptr;\n#  else\ntypedef unsigned int uptr;\ntypedef signed int sptr;\n#  endif\n#endif  // defined(_WIN64)\n#if defined(__x86_64__)\n// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use\n// 64-bit pointer to unwind stack frame.\ntypedef unsigned long long uhwptr;\n#else\ntypedef uptr uhwptr;\n#endif\ntypedef unsigned char u8;\ntypedef unsigned short u16;\ntypedef unsigned int u32;\ntypedef unsigned long long u64;\ntypedef signed char s8;\ntypedef signed short s16;\ntypedef signed int s32;\ntypedef signed long long s64;\n#if SANITIZER_WINDOWS\n// On Windows, files are HANDLE, which is a synonim of void*.\n// Use void* to avoid including <windows.h> everywhere.\ntypedef void* fd_t;\ntypedef unsigned error_t;\n#else\ntypedef int fd_t;\ntypedef int error_t;\n#endif\n#if SANITIZER_SOLARIS && !defined(_LP64)\ntypedef long pid_t;\n#else\ntypedef int pid_t;\n#endif\n\n#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC ||             \\\n    (SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \\\n    (SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))\ntypedef u64 OFF_T;\n#else\ntypedef uptr OFF_T;\n#endif\ntypedef u64  OFF64_T;\n\n#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC\ntypedef uptr operator_new_size_type;\n#else\n# if defined(__s390__) && !defined(__s390x__)\n// Special case: 31-bit s390 has unsigned long as size_t.\ntypedef unsigned long operator_new_size_type;\n# else\ntypedef u32 operator_new_size_type;\n# endif\n#endif\n\ntypedef u64 tid_t;\n\n// ----------- ATTENTION -------------\n// This header should NOT include any other headers to avoid portability issues.\n\n// Common defs.\n#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE\n#define SANITIZER_WEAK_DEFAULT_IMPL \\\n  extern \"C\" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE\n#define SANITIZER_WEAK_CXX_DEFAULT_IMPL \\\n  extern \"C++\" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE\n\n// Platform-specific defs.\n#if defined(_MSC_VER)\n# define ALWAYS_INLINE __forceinline\n// FIXME(timurrrr): do we need this on Windows?\n# define ALIAS(x)\n# define ALIGNED(x) __declspec(align(x))\n# define FORMAT(f, a)\n# define NOINLINE __declspec(noinline)\n# define NORETURN __declspec(noreturn)\n# define THREADLOCAL   __declspec(thread)\n# define LIKELY(x) (x)\n# define UNLIKELY(x) (x)\n# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0\n# define WARN_UNUSED_RESULT\n#else  // _MSC_VER\n# define ALWAYS_INLINE inline __attribute__((always_inline))\n# define ALIAS(x) __attribute__((alias(x)))\n// Please only use the ALIGNED macro before the type.\n// Using ALIGNED after the variable declaration is not portable!\n# define ALIGNED(x) __attribute__((aligned(x)))\n# define FORMAT(f, a)  __attribute__((format(printf, f, a)))\n# define NOINLINE __attribute__((noinline))\n# define NORETURN  __attribute__((noreturn))\n# define THREADLOCAL   __thread\n# define LIKELY(x)     __builtin_expect(!!(x), 1)\n# define UNLIKELY(x)   __builtin_expect(!!(x), 0)\n# if defined(__i386__) || defined(__x86_64__)\n// __builtin_prefetch(x) generates prefetchnt0 on x86\n#  define PREFETCH(x) __asm__(\"prefetchnta (%0)\" : : \"r\" (x))\n# else\n#  define PREFETCH(x) __builtin_prefetch(x)\n# endif\n# define WARN_UNUSED_RESULT __attribute__((warn_unused_result))\n#endif  // _MSC_VER\n\n#if !defined(_MSC_VER) || defined(__clang__)\n# define UNUSED __attribute__((unused))\n# define USED __attribute__((used))\n#else\n# define UNUSED\n# define USED\n#endif\n\n#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)\n# define NOEXCEPT noexcept\n#else\n# define NOEXCEPT throw()\n#endif\n\n#if __has_cpp_attribute(clang::fallthrough)\n#  define FALLTHROUGH [[clang::fallthrough]]\n#else\n#  define FALLTHROUGH\n#endif\n\n// Unaligned versions of basic types.\ntypedef ALIGNED(1) u16 uu16;\ntypedef ALIGNED(1) u32 uu32;\ntypedef ALIGNED(1) u64 uu64;\ntypedef ALIGNED(1) s16 us16;\ntypedef ALIGNED(1) s32 us32;\ntypedef ALIGNED(1) s64 us64;\n\n#if SANITIZER_WINDOWS\n}  // namespace __sanitizer\ntypedef unsigned long DWORD;\nnamespace __sanitizer {\ntypedef DWORD thread_return_t;\n# define THREAD_CALLING_CONV __stdcall\n#else  // _WIN32\ntypedef void* thread_return_t;\n# define THREAD_CALLING_CONV\n#endif  // _WIN32\ntypedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);\n\n// NOTE: Functions below must be defined in each run-time.\nvoid NORETURN Die();\n\nvoid NORETURN CheckFailed(const char *file, int line, const char *cond,\n                          u64 v1, u64 v2);\n\n// Check macro\n#define RAW_CHECK_MSG(expr, msg, ...)          \\\n  do {                                         \\\n    if (UNLIKELY(!(expr))) {                   \\\n      const char* msgs[] = {msg, __VA_ARGS__}; \\\n      for (const char* m : msgs) RawWrite(m);  \\\n      Die();                                   \\\n    }                                          \\\n  } while (0)\n\n#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr \"\\n\", )\n#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr \"\\n\", __VA_ARGS__)\n\n#define CHECK_IMPL(c1, op, c2) \\\n  do { \\\n    __sanitizer::u64 v1 = (__sanitizer::u64)(c1); \\\n    __sanitizer::u64 v2 = (__sanitizer::u64)(c2); \\\n    if (UNLIKELY(!(v1 op v2))) \\\n      __sanitizer::CheckFailed(__FILE__, __LINE__, \\\n        \"(\" #c1 \") \" #op \" (\" #c2 \")\", v1, v2); \\\n  } while (false) \\\n/**/\n\n#define CHECK(a)       CHECK_IMPL((a), !=, 0)\n#define CHECK_EQ(a, b) CHECK_IMPL((a), ==, (b))\n#define CHECK_NE(a, b) CHECK_IMPL((a), !=, (b))\n#define CHECK_LT(a, b) CHECK_IMPL((a), <,  (b))\n#define CHECK_LE(a, b) CHECK_IMPL((a), <=, (b))\n#define CHECK_GT(a, b) CHECK_IMPL((a), >,  (b))\n#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))\n\n#if SANITIZER_DEBUG\n#define DCHECK(a)       CHECK(a)\n#define DCHECK_EQ(a, b) CHECK_EQ(a, b)\n#define DCHECK_NE(a, b) CHECK_NE(a, b)\n#define DCHECK_LT(a, b) CHECK_LT(a, b)\n#define DCHECK_LE(a, b) CHECK_LE(a, b)\n#define DCHECK_GT(a, b) CHECK_GT(a, b)\n#define DCHECK_GE(a, b) CHECK_GE(a, b)\n#else\n#define DCHECK(a)\n#define DCHECK_EQ(a, b)\n#define DCHECK_NE(a, b)\n#define DCHECK_LT(a, b)\n#define DCHECK_LE(a, b)\n#define DCHECK_GT(a, b)\n#define DCHECK_GE(a, b)\n#endif\n\n#define UNREACHABLE(msg) do { \\\n  CHECK(0 && msg); \\\n  Die(); \\\n} while (0)\n\n#define UNIMPLEMENTED() UNREACHABLE(\"unimplemented\")\n\n#define COMPILER_CHECK(pred) static_assert(pred, \"\")\n\n#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))\n\n// Limits for integral types. We have to redefine it in case we don't\n// have stdint.h (like in Visual Studio 9).\n#undef __INT64_C\n#undef __UINT64_C\n#if SANITIZER_WORDSIZE == 64\n# define __INT64_C(c)  c ## L\n# define __UINT64_C(c) c ## UL\n#else\n# define __INT64_C(c)  c ## LL\n# define __UINT64_C(c) c ## ULL\n#endif  // SANITIZER_WORDSIZE == 64\n#undef INT32_MIN\n#define INT32_MIN              (-2147483647-1)\n#undef INT32_MAX\n#define INT32_MAX              (2147483647)\n#undef UINT32_MAX\n#define UINT32_MAX             (4294967295U)\n#undef INT64_MIN\n#define INT64_MIN              (-__INT64_C(9223372036854775807)-1)\n#undef INT64_MAX\n#define INT64_MAX              (__INT64_C(9223372036854775807))\n#undef UINT64_MAX\n#define UINT64_MAX             (__UINT64_C(18446744073709551615))\n#undef UINTPTR_MAX\n#if SANITIZER_WORDSIZE == 64\n# define UINTPTR_MAX           (18446744073709551615UL)\n#else\n# define UINTPTR_MAX           (4294967295U)\n#endif  // SANITIZER_WORDSIZE == 64\n\nenum LinkerInitialized { LINKER_INITIALIZED = 0 };\n\n#if !defined(_MSC_VER) || defined(__clang__)\n#if SANITIZER_S390_31\n#define GET_CALLER_PC() \\\n  (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))\n#else\n#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)\n#endif\n#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)\ninline void Trap() {\n  __builtin_trap();\n}\n#else\nextern \"C\" void* _ReturnAddress(void);\nextern \"C\" void* _AddressOfReturnAddress(void);\n# pragma intrinsic(_ReturnAddress)\n# pragma intrinsic(_AddressOfReturnAddress)\n#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()\n// CaptureStackBackTrace doesn't need to know BP on Windows.\n#define GET_CURRENT_FRAME() \\\n  (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))\n\nextern \"C\" void __ud2(void);\n# pragma intrinsic(__ud2)\ninline void Trap() {\n  __ud2();\n}\n#endif\n\n#define HANDLE_EINTR(res, f)                                       \\\n  {                                                                \\\n    int rverrno;                                                   \\\n    do {                                                           \\\n      res = (f);                                                   \\\n    } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \\\n  }\n\n// Forces the compiler to generate a frame pointer in the function.\n#define ENABLE_FRAME_POINTER              \\\n  do {                                    \\\n    volatile __sanitizer::uptr enable_fp; \\\n    enable_fp = GET_CURRENT_FRAME();      \\\n    (void)enable_fp;                      \\\n  } while (0)\n\n// Internal thread identifier allocated by ThreadRegistry.\ntypedef u32 Tid;\nconstexpr Tid kInvalidTid = -1;\nconstexpr Tid kMainTid = 0;\n\n// Stack depot stack identifier.\ntypedef u32 StackID;\nconst StackID kInvalidStackID = 0;\n\n}  // namespace __sanitizer\n\nnamespace __asan {\nusing namespace __sanitizer;\n}\nnamespace __dsan {\nusing namespace __sanitizer;\n}\nnamespace __dfsan {\nusing namespace __sanitizer;\n}\nnamespace __lsan {\nusing namespace __sanitizer;\n}\nnamespace __msan {\nusing namespace __sanitizer;\n}\nnamespace __hwasan {\nusing namespace __sanitizer;\n}\nnamespace __tsan {\nusing namespace __sanitizer;\n}\nnamespace __scudo {\nusing namespace __sanitizer;\n}\nnamespace __ubsan {\nusing namespace __sanitizer;\n}\nnamespace __xray {\nusing namespace __sanitizer;\n}\nnamespace __interception {\nusing namespace __sanitizer;\n}\nnamespace __hwasan {\nusing namespace __sanitizer;\n}\nnamespace __memprof {\nusing namespace __sanitizer;\n}\n\n#endif  // SANITIZER_DEFS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_leb128.h",
    "content": "//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_LEB128_H\n#define SANITIZER_LEB128_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\ntemplate <typename T, typename It>\nIt EncodeSLEB128(T value, It begin, It end) {\n  bool more;\n  do {\n    u8 byte = value & 0x7f;\n    // NOTE: this assumes that this signed shift is an arithmetic right shift.\n    value >>= 7;\n    more = !((((value == 0) && ((byte & 0x40) == 0)) ||\n              ((value == -1) && ((byte & 0x40) != 0))));\n    if (more)\n      byte |= 0x80;\n    if (UNLIKELY(begin == end))\n      break;\n    *(begin++) = byte;\n  } while (more);\n  return begin;\n}\n\ntemplate <typename T, typename It>\nIt DecodeSLEB128(It begin, It end, T* v) {\n  T value = 0;\n  unsigned shift = 0;\n  u8 byte;\n  do {\n    if (UNLIKELY(begin == end))\n      return begin;\n    byte = *(begin++);\n    T slice = byte & 0x7f;\n    value |= slice << shift;\n    shift += 7;\n  } while (byte >= 128);\n  if (shift < 64 && (byte & 0x40))\n    value |= (-1ULL) << shift;\n  *v = value;\n  return begin;\n}\n\ntemplate <typename T, typename It>\nIt EncodeULEB128(T value, It begin, It end) {\n  do {\n    u8 byte = value & 0x7f;\n    value >>= 7;\n    if (value)\n      byte |= 0x80;\n    if (UNLIKELY(begin == end))\n      break;\n    *(begin++) = byte;\n  } while (value);\n  return begin;\n}\n\ntemplate <typename T, typename It>\nIt DecodeULEB128(It begin, It end, T* v) {\n  T value = 0;\n  unsigned shift = 0;\n  u8 byte;\n  do {\n    if (UNLIKELY(begin == end))\n      return begin;\n    byte = *(begin++);\n    T slice = byte & 0x7f;\n    value += slice << shift;\n    shift += 7;\n  } while (byte >= 128);\n  *v = value;\n  return begin;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_LEB128_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_lfstack.h",
    "content": "//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Lock-free stack.\n// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.\n// The memory passed to Push() must not be ever munmap'ed.\n// The type T must contain T *next field.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_LFSTACK_H\n#define SANITIZER_LFSTACK_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_atomic.h\"\n\nnamespace __sanitizer {\n\ntemplate<typename T>\nstruct LFStack {\n  void Clear() {\n    atomic_store(&head_, 0, memory_order_relaxed);\n  }\n\n  bool Empty() const {\n    return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;\n  }\n\n  void Push(T *p) {\n    u64 cmp = atomic_load(&head_, memory_order_relaxed);\n    for (;;) {\n      u64 cnt = (cmp & kCounterMask) + kCounterInc;\n      u64 xch = (u64)(uptr)p | cnt;\n      p->next = (T*)(uptr)(cmp & kPtrMask);\n      if (atomic_compare_exchange_weak(&head_, &cmp, xch,\n                                       memory_order_release))\n        break;\n    }\n  }\n\n  T *Pop() {\n    u64 cmp = atomic_load(&head_, memory_order_acquire);\n    for (;;) {\n      T *cur = (T*)(uptr)(cmp & kPtrMask);\n      if (!cur)\n        return nullptr;\n      T *nxt = cur->next;\n      u64 cnt = (cmp & kCounterMask);\n      u64 xch = (u64)(uptr)nxt | cnt;\n      if (atomic_compare_exchange_weak(&head_, &cmp, xch,\n                                       memory_order_acquire))\n        return cur;\n    }\n  }\n\n  // private:\n  static const int kCounterBits = FIRST_32_SECOND_64(32, 17);\n  static const u64 kPtrMask = ((u64)-1) >> kCounterBits;\n  static const u64 kCounterMask = ~kPtrMask;\n  static const u64 kCounterInc = kPtrMask + 1;\n\n  atomic_uint64_t head_;\n};\n} // namespace __sanitizer\n\n#endif // SANITIZER_LFSTACK_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_libc.cpp",
    "content": "//===-- sanitizer_libc.cpp ------------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries. See sanitizer_libc.h for details.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_libc.h\"\n\nnamespace __sanitizer {\n\ns64 internal_atoll(const char *nptr) {\n  return internal_simple_strtoll(nptr, nullptr, 10);\n}\n\nvoid *internal_memchr(const void *s, int c, uptr n) {\n  const char *t = (const char *)s;\n  for (uptr i = 0; i < n; ++i, ++t)\n    if (*t == c)\n      return reinterpret_cast<void *>(const_cast<char *>(t));\n  return nullptr;\n}\n\nvoid *internal_memrchr(const void *s, int c, uptr n) {\n  const char *t = (const char *)s;\n  void *res = nullptr;\n  for (uptr i = 0; i < n; ++i, ++t) {\n    if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));\n  }\n  return res;\n}\n\nint internal_memcmp(const void* s1, const void* s2, uptr n) {\n  const char *t1 = (const char *)s1;\n  const char *t2 = (const char *)s2;\n  for (uptr i = 0; i < n; ++i, ++t1, ++t2)\n    if (*t1 != *t2)\n      return *t1 < *t2 ? -1 : 1;\n  return 0;\n}\n\nvoid *internal_memcpy(void *dest, const void *src, uptr n) {\n  char *d = (char*)dest;\n  const char *s = (const char *)src;\n  for (uptr i = 0; i < n; ++i)\n    d[i] = s[i];\n  return dest;\n}\n\nvoid *internal_memmove(void *dest, const void *src, uptr n) {\n  char *d = (char*)dest;\n  const char *s = (const char *)src;\n  sptr i, signed_n = (sptr)n;\n  CHECK_GE(signed_n, 0);\n  if (d < s) {\n    for (i = 0; i < signed_n; ++i)\n      d[i] = s[i];\n  } else {\n    if (d > s && signed_n > 0) {\n      for (i = signed_n - 1; i >= 0; --i) {\n        d[i] = s[i];\n      }\n    }\n  }\n  return dest;\n}\n\nvoid *internal_memset(void* s, int c, uptr n) {\n  // Optimize for the most performance-critical case:\n  if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {\n    u64 *p = reinterpret_cast<u64*>(s);\n    u64 *e = p + n / 8;\n    u64 v = c;\n    v |= v << 8;\n    v |= v << 16;\n    v |= v << 32;\n    for (; p < e; p += 2)\n      p[0] = p[1] = v;\n    return s;\n  }\n  // The next line prevents Clang from making a call to memset() instead of the\n  // loop below.\n  // FIXME: building the runtime with -ffreestanding is a better idea. However\n  // there currently are linktime problems due to PR12396.\n  char volatile *t = (char*)s;\n  for (uptr i = 0; i < n; ++i, ++t) {\n    *t = c;\n  }\n  return s;\n}\n\nuptr internal_strcspn(const char *s, const char *reject) {\n  uptr i;\n  for (i = 0; s[i]; i++) {\n    if (internal_strchr(reject, s[i]))\n      return i;\n  }\n  return i;\n}\n\nchar* internal_strdup(const char *s) {\n  uptr len = internal_strlen(s);\n  char *s2 = (char*)InternalAlloc(len + 1);\n  internal_memcpy(s2, s, len);\n  s2[len] = 0;\n  return s2;\n}\n\nint internal_strcmp(const char *s1, const char *s2) {\n  while (true) {\n    unsigned c1 = *s1;\n    unsigned c2 = *s2;\n    if (c1 != c2) return (c1 < c2) ? -1 : 1;\n    if (c1 == 0) break;\n    s1++;\n    s2++;\n  }\n  return 0;\n}\n\nint internal_strncmp(const char *s1, const char *s2, uptr n) {\n  for (uptr i = 0; i < n; i++) {\n    unsigned c1 = *s1;\n    unsigned c2 = *s2;\n    if (c1 != c2) return (c1 < c2) ? -1 : 1;\n    if (c1 == 0) break;\n    s1++;\n    s2++;\n  }\n  return 0;\n}\n\nchar* internal_strchr(const char *s, int c) {\n  while (true) {\n    if (*s == (char)c)\n      return const_cast<char *>(s);\n    if (*s == 0)\n      return nullptr;\n    s++;\n  }\n}\n\nchar *internal_strchrnul(const char *s, int c) {\n  char *res = internal_strchr(s, c);\n  if (!res)\n    res = const_cast<char *>(s) + internal_strlen(s);\n  return res;\n}\n\nchar *internal_strrchr(const char *s, int c) {\n  const char *res = nullptr;\n  for (uptr i = 0; s[i]; i++) {\n    if (s[i] == c) res = s + i;\n  }\n  return const_cast<char *>(res);\n}\n\nuptr internal_strlen(const char *s) {\n  uptr i = 0;\n  while (s[i]) i++;\n  return i;\n}\n\nuptr internal_strlcat(char *dst, const char *src, uptr maxlen) {\n  const uptr srclen = internal_strlen(src);\n  const uptr dstlen = internal_strnlen(dst, maxlen);\n  if (dstlen == maxlen) return maxlen + srclen;\n  if (srclen < maxlen - dstlen) {\n    internal_memmove(dst + dstlen, src, srclen + 1);\n  } else {\n    internal_memmove(dst + dstlen, src, maxlen - dstlen - 1);\n    dst[maxlen - 1] = '\\0';\n  }\n  return dstlen + srclen;\n}\n\nchar *internal_strncat(char *dst, const char *src, uptr n) {\n  uptr len = internal_strlen(dst);\n  uptr i;\n  for (i = 0; i < n && src[i]; i++)\n    dst[len + i] = src[i];\n  dst[len + i] = 0;\n  return dst;\n}\n\nuptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {\n  const uptr srclen = internal_strlen(src);\n  if (srclen < maxlen) {\n    internal_memmove(dst, src, srclen + 1);\n  } else if (maxlen != 0) {\n    internal_memmove(dst, src, maxlen - 1);\n    dst[maxlen - 1] = '\\0';\n  }\n  return srclen;\n}\n\nchar *internal_strncpy(char *dst, const char *src, uptr n) {\n  uptr i;\n  for (i = 0; i < n && src[i]; i++)\n    dst[i] = src[i];\n  internal_memset(dst + i, '\\0', n - i);\n  return dst;\n}\n\nuptr internal_strnlen(const char *s, uptr maxlen) {\n  uptr i = 0;\n  while (i < maxlen && s[i]) i++;\n  return i;\n}\n\nchar *internal_strstr(const char *haystack, const char *needle) {\n  // This is O(N^2), but we are not using it in hot places.\n  uptr len1 = internal_strlen(haystack);\n  uptr len2 = internal_strlen(needle);\n  if (len1 < len2) return nullptr;\n  for (uptr pos = 0; pos <= len1 - len2; pos++) {\n    if (internal_memcmp(haystack + pos, needle, len2) == 0)\n      return const_cast<char *>(haystack) + pos;\n  }\n  return nullptr;\n}\n\ns64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {\n  CHECK_EQ(base, 10);\n  while (IsSpace(*nptr)) nptr++;\n  int sgn = 1;\n  u64 res = 0;\n  bool have_digits = false;\n  char *old_nptr = const_cast<char *>(nptr);\n  if (*nptr == '+') {\n    sgn = 1;\n    nptr++;\n  } else if (*nptr == '-') {\n    sgn = -1;\n    nptr++;\n  }\n  while (IsDigit(*nptr)) {\n    res = (res <= UINT64_MAX / 10) ? res * 10 : UINT64_MAX;\n    int digit = ((*nptr) - '0');\n    res = (res <= UINT64_MAX - digit) ? res + digit : UINT64_MAX;\n    have_digits = true;\n    nptr++;\n  }\n  if (endptr) {\n    *endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;\n  }\n  if (sgn > 0) {\n    return (s64)(Min((u64)INT64_MAX, res));\n  } else {\n    return (res > INT64_MAX) ? INT64_MIN : ((s64)res * -1);\n  }\n}\n\nuptr internal_wcslen(const wchar_t *s) {\n  uptr i = 0;\n  while (s[i]) i++;\n  return i;\n}\n\nuptr internal_wcsnlen(const wchar_t *s, uptr maxlen) {\n  uptr i = 0;\n  while (i < maxlen && s[i]) i++;\n  return i;\n}\n\nbool mem_is_zero(const char *beg, uptr size) {\n  CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40));  // Sanity check.\n  const char *end = beg + size;\n  uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));\n  uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));\n  uptr all = 0;\n  // Prologue.\n  for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)\n    all |= *mem;\n  // Aligned loop.\n  for (; aligned_beg < aligned_end; aligned_beg++)\n    all |= *aligned_beg;\n  // Epilogue.\n  if ((char *)aligned_end >= beg) {\n    for (const char *mem = (char *)aligned_end; mem < end; mem++) all |= *mem;\n  }\n  return all == 0;\n}\n\n} // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_libc.h",
    "content": "//===-- sanitizer_libc.h ----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n// These tools can not use some of the libc functions directly because those\n// functions are intercepted. Instead, we implement a tiny subset of libc here.\n// FIXME: Some of functions declared in this file are in fact POSIX, not libc.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_LIBC_H\n#define SANITIZER_LIBC_H\n\n// ----------- ATTENTION -------------\n// This header should NOT include any other headers from sanitizer runtime.\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\n// internal_X() is a custom implementation of X() for use in RTL.\n\n// String functions\ns64 internal_atoll(const char *nptr);\nvoid *internal_memchr(const void *s, int c, uptr n);\nvoid *internal_memrchr(const void *s, int c, uptr n);\nint internal_memcmp(const void* s1, const void* s2, uptr n);\nvoid *internal_memcpy(void *dest, const void *src, uptr n);\nvoid *internal_memmove(void *dest, const void *src, uptr n);\n// Should not be used in performance-critical places.\nvoid *internal_memset(void *s, int c, uptr n);\nchar* internal_strchr(const char *s, int c);\nchar *internal_strchrnul(const char *s, int c);\nint internal_strcmp(const char *s1, const char *s2);\nuptr internal_strcspn(const char *s, const char *reject);\nchar *internal_strdup(const char *s);\nuptr internal_strlen(const char *s);\nuptr internal_strlcat(char *dst, const char *src, uptr maxlen);\nchar *internal_strncat(char *dst, const char *src, uptr n);\nint internal_strncmp(const char *s1, const char *s2, uptr n);\nuptr internal_strlcpy(char *dst, const char *src, uptr maxlen);\nchar *internal_strncpy(char *dst, const char *src, uptr n);\nuptr internal_strnlen(const char *s, uptr maxlen);\nchar *internal_strrchr(const char *s, int c);\nchar *internal_strstr(const char *haystack, const char *needle);\n// Works only for base=10 and doesn't set errno.\ns64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);\nint internal_snprintf(char *buffer, uptr length, const char *format, ...)\n    FORMAT(3, 4);\nuptr internal_wcslen(const wchar_t *s);\nuptr internal_wcsnlen(const wchar_t *s, uptr maxlen);\n\n// Return true if all bytes in [mem, mem+size) are zero.\n// Optimized for the case when the result is true.\nbool mem_is_zero(const char *mem, uptr size);\n\n// I/O\n// Define these as macros so we can use them in linker initialized global\n// structs without dynamic initialization.\n#define kInvalidFd ((fd_t)-1)\n#define kStdinFd ((fd_t)0)\n#define kStdoutFd ((fd_t)1)\n#define kStderrFd ((fd_t)2)\n\nuptr internal_ftruncate(fd_t fd, uptr size);\n\n// OS\nvoid NORETURN internal__exit(int exitcode);\nvoid internal_sleep(unsigned seconds);\nvoid internal_usleep(u64 useconds);\n\nuptr internal_getpid();\nuptr internal_getppid();\n\nint internal_dlinfo(void *handle, int request, void *p);\n\n// Threading\nuptr internal_sched_yield();\n\n// Error handling\nbool internal_iserror(uptr retval, int *rverrno = nullptr);\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_LIBC_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_libignore.cpp",
    "content": "//===-- sanitizer_libignore.cpp -------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \\\n    SANITIZER_NETBSD\n\n#include \"sanitizer_libignore.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_posix.h\"\n#include \"sanitizer_procmaps.h\"\n\nnamespace __sanitizer {\n\nLibIgnore::LibIgnore(LinkerInitialized) {\n}\n\nvoid LibIgnore::AddIgnoredLibrary(const char *name_templ) {\n  Lock lock(&mutex_);\n  if (count_ >= kMaxLibs) {\n    Report(\"%s: too many ignored libraries (max: %zu)\\n\", SanitizerToolName,\n           kMaxLibs);\n    Die();\n  }\n  Lib *lib = &libs_[count_++];\n  lib->templ = internal_strdup(name_templ);\n  lib->name = nullptr;\n  lib->real_name = nullptr;\n  lib->loaded = false;\n}\n\nvoid LibIgnore::OnLibraryLoaded(const char *name) {\n  Lock lock(&mutex_);\n  // Try to match suppressions with symlink target.\n  InternalMmapVector<char> buf(kMaxPathLength);\n  if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&\n      buf[0]) {\n    for (uptr i = 0; i < count_; i++) {\n      Lib *lib = &libs_[i];\n      if (!lib->loaded && (!lib->real_name) &&\n          TemplateMatch(lib->templ, name))\n        lib->real_name = internal_strdup(buf.data());\n    }\n  }\n\n  // Scan suppressions list and find newly loaded and unloaded libraries.\n  ListOfModules modules;\n  modules.init();\n  for (uptr i = 0; i < count_; i++) {\n    Lib *lib = &libs_[i];\n    bool loaded = false;\n    for (const auto &mod : modules) {\n      for (const auto &range : mod.ranges()) {\n        if (!range.executable)\n          continue;\n        if (!TemplateMatch(lib->templ, mod.full_name()) &&\n            !(lib->real_name &&\n            internal_strcmp(lib->real_name, mod.full_name()) == 0))\n          continue;\n        if (loaded) {\n          Report(\"%s: called_from_lib suppression '%s' is matched against\"\n                 \" 2 libraries: '%s' and '%s'\\n\",\n                 SanitizerToolName, lib->templ, lib->name, mod.full_name());\n          Die();\n        }\n        loaded = true;\n        if (lib->loaded)\n          continue;\n        VReport(1,\n                \"Matched called_from_lib suppression '%s' against library\"\n                \" '%s'\\n\",\n                lib->templ, mod.full_name());\n        lib->loaded = true;\n        lib->name = internal_strdup(mod.full_name());\n        const uptr idx =\n            atomic_load(&ignored_ranges_count_, memory_order_relaxed);\n        CHECK_LT(idx, ARRAY_SIZE(ignored_code_ranges_));\n        ignored_code_ranges_[idx].begin = range.beg;\n        ignored_code_ranges_[idx].end = range.end;\n        atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);\n        break;\n      }\n    }\n    if (lib->loaded && !loaded) {\n      Report(\"%s: library '%s' that was matched against called_from_lib\"\n             \" suppression '%s' is unloaded\\n\",\n             SanitizerToolName, lib->name, lib->templ);\n      Die();\n    }\n  }\n\n  // Track instrumented ranges.\n  if (track_instrumented_libs_) {\n    for (const auto &mod : modules) {\n      if (!mod.instrumented())\n        continue;\n      for (const auto &range : mod.ranges()) {\n        if (!range.executable)\n          continue;\n        if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))\n          continue;\n        VReport(1, \"Adding instrumented range 0x%zx-0x%zx from library '%s'\\n\",\n                range.beg, range.end, mod.full_name());\n        const uptr idx =\n            atomic_load(&instrumented_ranges_count_, memory_order_relaxed);\n        CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_));\n        instrumented_code_ranges_[idx].begin = range.beg;\n        instrumented_code_ranges_[idx].end = range.end;\n        atomic_store(&instrumented_ranges_count_, idx + 1,\n                     memory_order_release);\n      }\n    }\n  }\n}\n\nvoid LibIgnore::OnLibraryUnloaded() {\n  OnLibraryLoaded(nullptr);\n}\n\n} // namespace __sanitizer\n\n#endif  // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||\n        // SANITIZER_NETBSD\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_libignore.h",
    "content": "//===-- sanitizer_libignore.h -----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// LibIgnore allows to ignore all interceptors called from a particular set\n// of dynamic libraries. LibIgnore can be initialized with several templates\n// of names of libraries to be ignored. It finds code ranges for the libraries;\n// and checks whether the provided PC value belongs to the code ranges.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_LIBIGNORE_H\n#define SANITIZER_LIBIGNORE_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\nclass LibIgnore {\n public:\n  explicit LibIgnore(LinkerInitialized);\n\n  // Must be called during initialization.\n  void AddIgnoredLibrary(const char *name_templ);\n  void IgnoreNoninstrumentedModules(bool enable) {\n    track_instrumented_libs_ = enable;\n  }\n\n  // Must be called after a new dynamic library is loaded.\n  void OnLibraryLoaded(const char *name);\n\n  // Must be called after a dynamic library is unloaded.\n  void OnLibraryUnloaded();\n\n  // Checks whether the provided PC belongs to one of the ignored libraries or\n  // the PC should be ignored because it belongs to an non-instrumented module\n  // (when ignore_noninstrumented_modules=1). Also returns true via\n  // \"pc_in_ignored_lib\" if the PC is in an ignored library, false otherwise.\n  bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;\n\n  // Checks whether the provided PC belongs to an instrumented module.\n  bool IsPcInstrumented(uptr pc) const;\n\n private:\n  struct Lib {\n    char *templ;\n    char *name;\n    char *real_name;  // target of symlink\n    bool loaded;\n  };\n\n  struct LibCodeRange {\n    uptr begin;\n    uptr end;\n  };\n\n  inline bool IsInRange(uptr pc, const LibCodeRange &range) const {\n    return (pc >= range.begin && pc < range.end);\n  }\n\n  static const uptr kMaxIgnoredRanges = 128;\n  static const uptr kMaxInstrumentedRanges = 1024;\n  static const uptr kMaxLibs = 1024;\n\n  // Hot part:\n  atomic_uintptr_t ignored_ranges_count_;\n  LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];\n\n  atomic_uintptr_t instrumented_ranges_count_;\n  LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];\n\n  // Cold part:\n  Mutex mutex_;\n  uptr count_;\n  Lib libs_[kMaxLibs];\n  bool track_instrumented_libs_;\n\n  // Disallow copying of LibIgnore objects.\n  LibIgnore(const LibIgnore&);  // not implemented\n  void operator = (const LibIgnore&);  // not implemented\n};\n\ninline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {\n  const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);\n  for (uptr i = 0; i < n; i++) {\n    if (IsInRange(pc, ignored_code_ranges_[i])) {\n      *pc_in_ignored_lib = true;\n      return true;\n    }\n  }\n  *pc_in_ignored_lib = false;\n  if (track_instrumented_libs_ && !IsPcInstrumented(pc))\n    return true;\n  return false;\n}\n\ninline bool LibIgnore::IsPcInstrumented(uptr pc) const {\n  const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);\n  for (uptr i = 0; i < n; i++) {\n    if (IsInRange(pc, instrumented_code_ranges_[i]))\n      return true;\n  }\n  return false;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_LIBIGNORE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_linux.cpp",
    "content": "//===-- sanitizer_linux.cpp -----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and implements linux-specific functions from\n// sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_getauxval.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_procmaps.h\"\n\n#if SANITIZER_LINUX && !SANITIZER_GO\n#include <asm/param.h>\n#endif\n\n// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'\n// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To\n// access stat from asm/stat.h, without conflicting with definition in\n// sys/stat.h, we use this trick.\n#if defined(__mips64)\n#include <asm/unistd.h>\n#include <sys/types.h>\n#define stat kernel_stat\n#if SANITIZER_GO\n#undef st_atime\n#undef st_mtime\n#undef st_ctime\n#define st_atime st_atim\n#define st_mtime st_mtim\n#define st_ctime st_ctim\n#endif\n#include <asm/stat.h>\n#undef stat\n#endif\n\n#include <dlfcn.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <link.h>\n#include <pthread.h>\n#include <sched.h>\n#include <signal.h>\n#include <sys/mman.h>\n#include <sys/param.h>\n#if !SANITIZER_SOLARIS\n#include <sys/ptrace.h>\n#endif\n#include <sys/resource.h>\n#include <sys/stat.h>\n#include <sys/syscall.h>\n#include <sys/time.h>\n#include <sys/types.h>\n#include <ucontext.h>\n#include <unistd.h>\n\n#if SANITIZER_LINUX\n#include <sys/utsname.h>\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n#include <sys/personality.h>\n#endif\n\n#if SANITIZER_FREEBSD\n#include <sys/exec.h>\n#include <sys/procctl.h>\n#include <sys/sysctl.h>\n#include <machine/atomic.h>\nextern \"C\" {\n// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on\n// FreeBSD 9.2 and 10.0.\n#include <sys/umtx.h>\n}\n#include <sys/thr.h>\n#endif  // SANITIZER_FREEBSD\n\n#if SANITIZER_NETBSD\n#include <limits.h>  // For NAME_MAX\n#include <sys/sysctl.h>\n#include <sys/exec.h>\nextern struct ps_strings *__ps_strings;\n#endif  // SANITIZER_NETBSD\n\n#if SANITIZER_SOLARIS\n#include <stdlib.h>\n#include <thread.h>\n#define environ _environ\n#endif\n\nextern char **environ;\n\n#if SANITIZER_LINUX\n// <linux/time.h>\nstruct kernel_timeval {\n  long tv_sec;\n  long tv_usec;\n};\n\n// <linux/futex.h> is broken on some linux distributions.\nconst int FUTEX_WAIT = 0;\nconst int FUTEX_WAKE = 1;\nconst int FUTEX_PRIVATE_FLAG = 128;\nconst int FUTEX_WAIT_PRIVATE = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;\nconst int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;\n#endif  // SANITIZER_LINUX\n\n// Are we using 32-bit or 64-bit Linux syscalls?\n// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32\n// but it still needs to use 64-bit syscalls.\n#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) ||       \\\n                        SANITIZER_WORDSIZE == 64)\n# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1\n#else\n# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0\n#endif\n\n// Note : FreeBSD had implemented both\n// Linux apis, available from\n// future 12.x version most likely\n#if SANITIZER_LINUX && defined(__NR_getrandom)\n# if !defined(GRND_NONBLOCK)\n#  define GRND_NONBLOCK 1\n# endif\n# define SANITIZER_USE_GETRANDOM 1\n#else\n# define SANITIZER_USE_GETRANDOM 0\n#endif  // SANITIZER_LINUX && defined(__NR_getrandom)\n\n#if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000\n#  define SANITIZER_USE_GETENTROPY 1\n#else\n#  define SANITIZER_USE_GETENTROPY 0\n#endif\n\nnamespace __sanitizer {\n\nvoid SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *old) {\n  CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, old));\n}\n\nScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {\n  __sanitizer_sigset_t set;\n  internal_sigfillset(&set);\n#  if SANITIZER_LINUX && !SANITIZER_ANDROID\n  // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked\n  // on any thread, setuid call hangs.\n  // See test/sanitizer_common/TestCases/Linux/setuid.c.\n  internal_sigdelset(&set, 33);\n#  endif\n#  if SANITIZER_LINUX\n  // Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.\n  // If this signal is blocked, such calls cannot be handled and the process may\n  // hang.\n  internal_sigdelset(&set, 31);\n#  endif\n  SetSigProcMask(&set, &saved_);\n  if (copy)\n    internal_memcpy(copy, &saved_, sizeof(saved_));\n}\n\nScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }\n\n#  if SANITIZER_LINUX && defined(__x86_64__)\n#    include \"sanitizer_syscall_linux_x86_64.inc\"\n#  elif SANITIZER_LINUX && SANITIZER_RISCV64\n#    include \"sanitizer_syscall_linux_riscv64.inc\"\n#  elif SANITIZER_LINUX && defined(__aarch64__)\n#    include \"sanitizer_syscall_linux_aarch64.inc\"\n#  elif SANITIZER_LINUX && defined(__arm__)\n#    include \"sanitizer_syscall_linux_arm.inc\"\n#  elif SANITIZER_LINUX && defined(__hexagon__)\n#    include \"sanitizer_syscall_linux_hexagon.inc\"\n#  else\n#    include \"sanitizer_syscall_generic.inc\"\n#  endif\n\n// --------------- sanitizer_libc.h\n#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD\n#if !SANITIZER_S390\nuptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,\n                   u64 offset) {\n#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS\n  return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,\n                          offset);\n#else\n  // mmap2 specifies file offset in 4096-byte units.\n  CHECK(IsAligned(offset, 4096));\n  return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,\n                          offset / 4096);\n#endif\n}\n#endif // !SANITIZER_S390\n\nuptr internal_munmap(void *addr, uptr length) {\n  return internal_syscall(SYSCALL(munmap), (uptr)addr, length);\n}\n\n#if SANITIZER_LINUX\nuptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,\n                     void *new_address) {\n  return internal_syscall(SYSCALL(mremap), (uptr)old_address, old_size,\n                          new_size, flags, (uptr)new_address);\n}\n#endif\n\nint internal_mprotect(void *addr, uptr length, int prot) {\n  return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);\n}\n\nint internal_madvise(uptr addr, uptr length, int advice) {\n  return internal_syscall(SYSCALL(madvise), addr, length, advice);\n}\n\nuptr internal_close(fd_t fd) {\n  return internal_syscall(SYSCALL(close), fd);\n}\n\nuptr internal_open(const char *filename, int flags) {\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);\n#else\n  return internal_syscall(SYSCALL(open), (uptr)filename, flags);\n#endif\n}\n\nuptr internal_open(const char *filename, int flags, u32 mode) {\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,\n                          mode);\n#else\n  return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);\n#endif\n}\n\nuptr internal_read(fd_t fd, void *buf, uptr count) {\n  sptr res;\n  HANDLE_EINTR(res,\n               (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf, count));\n  return res;\n}\n\nuptr internal_write(fd_t fd, const void *buf, uptr count) {\n  sptr res;\n  HANDLE_EINTR(res,\n               (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf, count));\n  return res;\n}\n\nuptr internal_ftruncate(fd_t fd, uptr size) {\n  sptr res;\n  HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,\n               (OFF_T)size));\n  return res;\n}\n\n#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX\nstatic void stat64_to_stat(struct stat64 *in, struct stat *out) {\n  internal_memset(out, 0, sizeof(*out));\n  out->st_dev = in->st_dev;\n  out->st_ino = in->st_ino;\n  out->st_mode = in->st_mode;\n  out->st_nlink = in->st_nlink;\n  out->st_uid = in->st_uid;\n  out->st_gid = in->st_gid;\n  out->st_rdev = in->st_rdev;\n  out->st_size = in->st_size;\n  out->st_blksize = in->st_blksize;\n  out->st_blocks = in->st_blocks;\n  out->st_atime = in->st_atime;\n  out->st_mtime = in->st_mtime;\n  out->st_ctime = in->st_ctime;\n}\n#endif\n\n#if defined(__mips64)\n// Undefine compatibility macros from <sys/stat.h>\n// so that they would not clash with the kernel_stat\n// st_[a|m|c]time fields\n#if !SANITIZER_GO\n#undef st_atime\n#undef st_mtime\n#undef st_ctime\n#endif\n#if defined(SANITIZER_ANDROID)\n// Bionic sys/stat.h defines additional macros\n// for compatibility with the old NDKs and\n// they clash with the kernel_stat structure\n// st_[a|m|c]time_nsec fields.\n#undef st_atime_nsec\n#undef st_mtime_nsec\n#undef st_ctime_nsec\n#endif\nstatic void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {\n  internal_memset(out, 0, sizeof(*out));\n  out->st_dev = in->st_dev;\n  out->st_ino = in->st_ino;\n  out->st_mode = in->st_mode;\n  out->st_nlink = in->st_nlink;\n  out->st_uid = in->st_uid;\n  out->st_gid = in->st_gid;\n  out->st_rdev = in->st_rdev;\n  out->st_size = in->st_size;\n  out->st_blksize = in->st_blksize;\n  out->st_blocks = in->st_blocks;\n#if defined(__USE_MISC)     || \\\n    defined(__USE_XOPEN2K8) || \\\n    defined(SANITIZER_ANDROID)\n  out->st_atim.tv_sec = in->st_atime;\n  out->st_atim.tv_nsec = in->st_atime_nsec;\n  out->st_mtim.tv_sec = in->st_mtime;\n  out->st_mtim.tv_nsec = in->st_mtime_nsec;\n  out->st_ctim.tv_sec = in->st_ctime;\n  out->st_ctim.tv_nsec = in->st_ctime_nsec;\n#else\n  out->st_atime = in->st_atime;\n  out->st_atimensec = in->st_atime_nsec;\n  out->st_mtime = in->st_mtime;\n  out->st_mtimensec = in->st_mtime_nsec;\n  out->st_ctime = in->st_ctime;\n  out->st_atimensec = in->st_ctime_nsec;\n#endif\n}\n#endif\n\nuptr internal_stat(const char *path, void *buf) {\n#if SANITIZER_FREEBSD\n  return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);\n#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,\n                          0);\n#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS\n# if defined(__mips64)\n  // For mips64, stat syscall fills buffer in the format of kernel_stat\n  struct kernel_stat kbuf;\n  int res = internal_syscall(SYSCALL(stat), path, &kbuf);\n  kernel_stat_to_stat(&kbuf, (struct stat *)buf);\n  return res;\n# else\n  return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);\n# endif\n#else\n  struct stat64 buf64;\n  int res = internal_syscall(SYSCALL(stat64), path, &buf64);\n  stat64_to_stat(&buf64, (struct stat *)buf);\n  return res;\n#endif\n}\n\nuptr internal_lstat(const char *path, void *buf) {\n#if SANITIZER_FREEBSD\n  return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,\n                          AT_SYMLINK_NOFOLLOW);\n#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,\n                          AT_SYMLINK_NOFOLLOW);\n#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS\n# if SANITIZER_MIPS64\n  // For mips64, lstat syscall fills buffer in the format of kernel_stat\n  struct kernel_stat kbuf;\n  int res = internal_syscall(SYSCALL(lstat), path, &kbuf);\n  kernel_stat_to_stat(&kbuf, (struct stat *)buf);\n  return res;\n# else\n  return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);\n# endif\n#else\n  struct stat64 buf64;\n  int res = internal_syscall(SYSCALL(lstat64), path, &buf64);\n  stat64_to_stat(&buf64, (struct stat *)buf);\n  return res;\n#endif\n}\n\nuptr internal_fstat(fd_t fd, void *buf) {\n#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS\n#if SANITIZER_MIPS64\n  // For mips64, fstat syscall fills buffer in the format of kernel_stat\n  struct kernel_stat kbuf;\n  int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);\n  kernel_stat_to_stat(&kbuf, (struct stat *)buf);\n  return res;\n# else\n  return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);\n# endif\n#else\n  struct stat64 buf64;\n  int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);\n  stat64_to_stat(&buf64, (struct stat *)buf);\n  return res;\n#endif\n}\n\nuptr internal_filesize(fd_t fd) {\n  struct stat st;\n  if (internal_fstat(fd, &st))\n    return -1;\n  return (uptr)st.st_size;\n}\n\nuptr internal_dup(int oldfd) {\n  return internal_syscall(SYSCALL(dup), oldfd);\n}\n\nuptr internal_dup2(int oldfd, int newfd) {\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);\n#else\n  return internal_syscall(SYSCALL(dup2), oldfd, newfd);\n#endif\n}\n\nuptr internal_readlink(const char *path, char *buf, uptr bufsize) {\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,\n                          bufsize);\n#else\n  return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);\n#endif\n}\n\nuptr internal_unlink(const char *path) {\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);\n#else\n  return internal_syscall(SYSCALL(unlink), (uptr)path);\n#endif\n}\n\nuptr internal_rename(const char *oldpath, const char *newpath) {\n#if defined(__riscv) && defined(__linux__)\n  return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,\n                          (uptr)newpath, 0);\n#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,\n                          (uptr)newpath);\n#else\n  return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);\n#endif\n}\n\nuptr internal_sched_yield() {\n  return internal_syscall(SYSCALL(sched_yield));\n}\n\nvoid internal_usleep(u64 useconds) {\n  struct timespec ts;\n  ts.tv_sec = useconds / 1000000;\n  ts.tv_nsec = (useconds % 1000000) * 1000;\n  internal_syscall(SYSCALL(nanosleep), &ts, &ts);\n}\n\nuptr internal_execve(const char *filename, char *const argv[],\n                     char *const envp[]) {\n  return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,\n                          (uptr)envp);\n}\n#endif  // !SANITIZER_SOLARIS && !SANITIZER_NETBSD\n\n#if !SANITIZER_NETBSD\nvoid internal__exit(int exitcode) {\n#if SANITIZER_FREEBSD || SANITIZER_SOLARIS\n  internal_syscall(SYSCALL(exit), exitcode);\n#else\n  internal_syscall(SYSCALL(exit_group), exitcode);\n#endif\n  Die();  // Unreachable.\n}\n#endif  // !SANITIZER_NETBSD\n\n// ----------------- sanitizer_common.h\nbool FileExists(const char *filename) {\n  if (ShouldMockFailureToOpen(filename))\n    return false;\n  struct stat st;\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))\n#else\n  if (internal_stat(filename, &st))\n#endif\n    return false;\n  // Sanity check: filename is a regular file.\n  return S_ISREG(st.st_mode);\n}\n\n#if !SANITIZER_NETBSD\ntid_t GetTid() {\n#if SANITIZER_FREEBSD\n  long Tid;\n  thr_self(&Tid);\n  return Tid;\n#elif SANITIZER_SOLARIS\n  return thr_self();\n#else\n  return internal_syscall(SYSCALL(gettid));\n#endif\n}\n\nint TgKill(pid_t pid, tid_t tid, int sig) {\n#if SANITIZER_LINUX\n  return internal_syscall(SYSCALL(tgkill), pid, tid, sig);\n#elif SANITIZER_FREEBSD\n  return internal_syscall(SYSCALL(thr_kill2), pid, tid, sig);\n#elif SANITIZER_SOLARIS\n  (void)pid;\n  return thr_kill(tid, sig);\n#endif\n}\n#endif\n\n#if SANITIZER_GLIBC\nu64 NanoTime() {\n  kernel_timeval tv;\n  internal_memset(&tv, 0, sizeof(tv));\n  internal_syscall(SYSCALL(gettimeofday), &tv, 0);\n  return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;\n}\n// Used by real_clock_gettime.\nuptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {\n  return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);\n}\n#elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD\nu64 NanoTime() {\n  struct timespec ts;\n  clock_gettime(CLOCK_REALTIME, &ts);\n  return (u64)ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;\n}\n#endif\n\n// Like getenv, but reads env directly from /proc (on Linux) or parses the\n// 'environ' array (on some others) and does not use libc. This function\n// should be called first inside __asan_init.\nconst char *GetEnv(const char *name) {\n#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS\n  if (::environ != 0) {\n    uptr NameLen = internal_strlen(name);\n    for (char **Env = ::environ; *Env != 0; Env++) {\n      if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')\n        return (*Env) + NameLen + 1;\n    }\n  }\n  return 0;  // Not found.\n#elif SANITIZER_LINUX\n  static char *environ;\n  static uptr len;\n  static bool inited;\n  if (!inited) {\n    inited = true;\n    uptr environ_size;\n    if (!ReadFileToBuffer(\"/proc/self/environ\", &environ, &environ_size, &len))\n      environ = nullptr;\n  }\n  if (!environ || len == 0) return nullptr;\n  uptr namelen = internal_strlen(name);\n  const char *p = environ;\n  while (*p != '\\0') {  // will happen at the \\0\\0 that terminates the buffer\n    // proc file has the format NAME=value\\0NAME=value\\0NAME=value\\0...\n    const char* endp =\n        (char*)internal_memchr(p, '\\0', len - (p - environ));\n    if (!endp)  // this entry isn't NUL terminated\n      return nullptr;\n    else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=')  // Match.\n      return p + namelen + 1;  // point after =\n    p = endp + 1;\n  }\n  return nullptr;  // Not found.\n#else\n#error \"Unsupported platform\"\n#endif\n}\n\n#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO\nextern \"C\" {\nSANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;\n}\n#endif\n\n#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD\nstatic void ReadNullSepFileToArray(const char *path, char ***arr,\n                                   int arr_size) {\n  char *buff;\n  uptr buff_size;\n  uptr buff_len;\n  *arr = (char **)MmapOrDie(arr_size * sizeof(char *), \"NullSepFileArray\");\n  if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) {\n    (*arr)[0] = nullptr;\n    return;\n  }\n  (*arr)[0] = buff;\n  int count, i;\n  for (count = 1, i = 1; ; i++) {\n    if (buff[i] == 0) {\n      if (buff[i+1] == 0) break;\n      (*arr)[count] = &buff[i+1];\n      CHECK_LE(count, arr_size - 1);  // FIXME: make this more flexible.\n      count++;\n    }\n  }\n  (*arr)[count] = nullptr;\n}\n#endif\n\nstatic void GetArgsAndEnv(char ***argv, char ***envp) {\n#if SANITIZER_FREEBSD\n  // On FreeBSD, retrieving the argument and environment arrays is done via the\n  // kern.ps_strings sysctl, which returns a pointer to a structure containing\n  // this information. See also <sys/exec.h>.\n  ps_strings *pss;\n  uptr sz = sizeof(pss);\n  if (internal_sysctlbyname(\"kern.ps_strings\", &pss, &sz, NULL, 0) == -1) {\n    Printf(\"sysctl kern.ps_strings failed\\n\");\n    Die();\n  }\n  *argv = pss->ps_argvstr;\n  *envp = pss->ps_envstr;\n#elif SANITIZER_NETBSD\n  *argv = __ps_strings->ps_argvstr;\n  *envp = __ps_strings->ps_envstr;\n#else // SANITIZER_FREEBSD\n#if !SANITIZER_GO\n  if (&__libc_stack_end) {\n    uptr* stack_end = (uptr*)__libc_stack_end;\n    // Normally argc can be obtained from *stack_end, however, on ARM glibc's\n    // _start clobbers it:\n    // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/arm/start.S;hb=refs/heads/release/2.31/master#l75\n    // Do not special-case ARM and infer argc from argv everywhere.\n    int argc = 0;\n    while (stack_end[argc + 1]) argc++;\n    *argv = (char**)(stack_end + 1);\n    *envp = (char**)(stack_end + argc + 2);\n  } else {\n#endif // !SANITIZER_GO\n    static const int kMaxArgv = 2000, kMaxEnvp = 2000;\n    ReadNullSepFileToArray(\"/proc/self/cmdline\", argv, kMaxArgv);\n    ReadNullSepFileToArray(\"/proc/self/environ\", envp, kMaxEnvp);\n#if !SANITIZER_GO\n  }\n#endif // !SANITIZER_GO\n#endif // SANITIZER_FREEBSD\n}\n\nchar **GetArgv() {\n  char **argv, **envp;\n  GetArgsAndEnv(&argv, &envp);\n  return argv;\n}\n\nchar **GetEnviron() {\n  char **argv, **envp;\n  GetArgsAndEnv(&argv, &envp);\n  return envp;\n}\n\n#if !SANITIZER_SOLARIS\nvoid FutexWait(atomic_uint32_t *p, u32 cmp) {\n#    if SANITIZER_FREEBSD\n  _umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);\n#    elif SANITIZER_NETBSD\n  sched_yield();   /* No userspace futex-like synchronization */\n#    else\n  internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);\n#    endif\n}\n\nvoid FutexWake(atomic_uint32_t *p, u32 count) {\n#    if SANITIZER_FREEBSD\n  _umtx_op(p, UMTX_OP_WAKE, count, 0, 0);\n#    elif SANITIZER_NETBSD\n                   /* No userspace futex-like synchronization */\n#    else\n  internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);\n#    endif\n}\n\n#  endif  // !SANITIZER_SOLARIS\n\n// ----------------- sanitizer_linux.h\n// The actual size of this structure is specified by d_reclen.\n// Note that getdents64 uses a different structure format. We only provide the\n// 32-bit syscall here.\n#if SANITIZER_NETBSD\n// Not used\n#else\nstruct linux_dirent {\n#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64\n  u64 d_ino;\n  u64 d_off;\n#else\n  unsigned long      d_ino;\n  unsigned long      d_off;\n#endif\n  unsigned short     d_reclen;\n#if defined(__aarch64__) || SANITIZER_RISCV64\n  unsigned char      d_type;\n#endif\n  char               d_name[256];\n};\n#endif\n\n#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD\n// Syscall wrappers.\nuptr internal_ptrace(int request, int pid, void *addr, void *data) {\n  return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,\n                          (uptr)data);\n}\n\nuptr internal_waitpid(int pid, int *status, int options) {\n  return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,\n                          0 /* rusage */);\n}\n\nuptr internal_getpid() {\n  return internal_syscall(SYSCALL(getpid));\n}\n\nuptr internal_getppid() {\n  return internal_syscall(SYSCALL(getppid));\n}\n\nint internal_dlinfo(void *handle, int request, void *p) {\n#if SANITIZER_FREEBSD\n  return dlinfo(handle, request, p);\n#else\n  UNIMPLEMENTED();\n#endif\n}\n\nuptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {\n#if SANITIZER_FREEBSD\n  return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);\n#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);\n#else\n  return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);\n#endif\n}\n\nuptr internal_lseek(fd_t fd, OFF_T offset, int whence) {\n  return internal_syscall(SYSCALL(lseek), fd, offset, whence);\n}\n\n#if SANITIZER_LINUX\nuptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {\n  return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);\n}\n#endif\n\nuptr internal_sigaltstack(const void *ss, void *oss) {\n  return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);\n}\n\nint internal_fork() {\n#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n  return internal_syscall(SYSCALL(clone), SIGCHLD, 0);\n#else\n  return internal_syscall(SYSCALL(fork));\n#endif\n}\n\n#if SANITIZER_FREEBSD\nint internal_sysctl(const int *name, unsigned int namelen, void *oldp,\n                    uptr *oldlenp, const void *newp, uptr newlen) {\n  return internal_syscall(SYSCALL(__sysctl), name, namelen, oldp,\n                          (size_t *)oldlenp, newp, (size_t)newlen);\n}\n\nint internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,\n                          const void *newp, uptr newlen) {\n  // Note: this function can be called during startup, so we need to avoid\n  // calling any interceptable functions. On FreeBSD >= 1300045 sysctlbyname()\n  // is a real syscall, but for older versions it calls sysctlnametomib()\n  // followed by sysctl(). To avoid calling the intercepted version and\n  // asserting if this happens during startup, call the real sysctlnametomib()\n  // followed by internal_sysctl() if the syscall is not available.\n#ifdef SYS___sysctlbyname\n  return internal_syscall(SYSCALL(__sysctlbyname), sname,\n                          internal_strlen(sname), oldp, (size_t *)oldlenp, newp,\n                          (size_t)newlen);\n#else\n  static decltype(sysctlnametomib) *real_sysctlnametomib = nullptr;\n  if (!real_sysctlnametomib)\n    real_sysctlnametomib =\n        (decltype(sysctlnametomib) *)dlsym(RTLD_NEXT, \"sysctlnametomib\");\n  CHECK(real_sysctlnametomib);\n\n  int oid[CTL_MAXNAME];\n  size_t len = CTL_MAXNAME;\n  if (real_sysctlnametomib(sname, oid, &len) == -1)\n    return (-1);\n  return internal_sysctl(oid, len, oldp, oldlenp, newp, newlen);\n#endif\n}\n#endif\n\n#if SANITIZER_LINUX\n#define SA_RESTORER 0x04000000\n// Doesn't set sa_restorer if the caller did not set it, so use with caution\n//(see below).\nint internal_sigaction_norestorer(int signum, const void *act, void *oldact) {\n  __sanitizer_kernel_sigaction_t k_act, k_oldact;\n  internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));\n  internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));\n  const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act;\n  __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;\n  if (u_act) {\n    k_act.handler = u_act->handler;\n    k_act.sigaction = u_act->sigaction;\n    internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,\n                    sizeof(__sanitizer_kernel_sigset_t));\n    // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL).\n    k_act.sa_flags = u_act->sa_flags | SA_RESTORER;\n    // FIXME: most often sa_restorer is unset, however the kernel requires it\n    // to point to a valid signal restorer that calls the rt_sigreturn syscall.\n    // If sa_restorer passed to the kernel is NULL, the program may crash upon\n    // signal delivery or fail to unwind the stack in the signal handler.\n    // libc implementation of sigaction() passes its own restorer to\n    // rt_sigaction, so we need to do the same (we'll need to reimplement the\n    // restorers; for x86_64 the restorer address can be obtained from\n    // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).\n#if !SANITIZER_ANDROID || !SANITIZER_MIPS32\n    k_act.sa_restorer = u_act->sa_restorer;\n#endif\n  }\n\n  uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,\n      (uptr)(u_act ? &k_act : nullptr),\n      (uptr)(u_oldact ? &k_oldact : nullptr),\n      (uptr)sizeof(__sanitizer_kernel_sigset_t));\n\n  if ((result == 0) && u_oldact) {\n    u_oldact->handler = k_oldact.handler;\n    u_oldact->sigaction = k_oldact.sigaction;\n    internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,\n                    sizeof(__sanitizer_kernel_sigset_t));\n    u_oldact->sa_flags = k_oldact.sa_flags;\n#if !SANITIZER_ANDROID || !SANITIZER_MIPS32\n    u_oldact->sa_restorer = k_oldact.sa_restorer;\n#endif\n  }\n  return result;\n}\n#endif  // SANITIZER_LINUX\n\nuptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,\n                          __sanitizer_sigset_t *oldset) {\n#if SANITIZER_FREEBSD\n  return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);\n#else\n  __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;\n  __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;\n  return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set,\n                          (uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t));\n#endif\n}\n\nvoid internal_sigfillset(__sanitizer_sigset_t *set) {\n  internal_memset(set, 0xff, sizeof(*set));\n}\n\nvoid internal_sigemptyset(__sanitizer_sigset_t *set) {\n  internal_memset(set, 0, sizeof(*set));\n}\n\n#if SANITIZER_LINUX\nvoid internal_sigdelset(__sanitizer_sigset_t *set, int signum) {\n  signum -= 1;\n  CHECK_GE(signum, 0);\n  CHECK_LT(signum, sizeof(*set) * 8);\n  __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;\n  const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);\n  const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);\n  k_set->sig[idx] &= ~((uptr)1 << bit);\n}\n\nbool internal_sigismember(__sanitizer_sigset_t *set, int signum) {\n  signum -= 1;\n  CHECK_GE(signum, 0);\n  CHECK_LT(signum, sizeof(*set) * 8);\n  __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;\n  const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);\n  const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);\n  return k_set->sig[idx] & ((uptr)1 << bit);\n}\n#elif SANITIZER_FREEBSD\nvoid internal_sigdelset(__sanitizer_sigset_t *set, int signum) {\n  sigset_t *rset = reinterpret_cast<sigset_t *>(set);\n  sigdelset(rset, signum);\n}\n\nbool internal_sigismember(__sanitizer_sigset_t *set, int signum) {\n  sigset_t *rset = reinterpret_cast<sigset_t *>(set);\n  return sigismember(rset, signum);\n}\n#endif\n#endif // !SANITIZER_SOLARIS\n\n#if !SANITIZER_NETBSD\n// ThreadLister implementation.\nThreadLister::ThreadLister(pid_t pid) : pid_(pid), buffer_(4096) {\n  char task_directory_path[80];\n  internal_snprintf(task_directory_path, sizeof(task_directory_path),\n                    \"/proc/%d/task/\", pid);\n  descriptor_ = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);\n  if (internal_iserror(descriptor_)) {\n    Report(\"Can't open /proc/%d/task for reading.\\n\", pid);\n  }\n}\n\nThreadLister::Result ThreadLister::ListThreads(\n    InternalMmapVector<tid_t> *threads) {\n  if (internal_iserror(descriptor_))\n    return Error;\n  internal_lseek(descriptor_, 0, SEEK_SET);\n  threads->clear();\n\n  Result result = Ok;\n  for (bool first_read = true;; first_read = false) {\n    // Resize to max capacity if it was downsized by IsAlive.\n    buffer_.resize(buffer_.capacity());\n    CHECK_GE(buffer_.size(), 4096);\n    uptr read = internal_getdents(\n        descriptor_, (struct linux_dirent *)buffer_.data(), buffer_.size());\n    if (!read)\n      return result;\n    if (internal_iserror(read)) {\n      Report(\"Can't read directory entries from /proc/%d/task.\\n\", pid_);\n      return Error;\n    }\n\n    for (uptr begin = (uptr)buffer_.data(), end = begin + read; begin < end;) {\n      struct linux_dirent *entry = (struct linux_dirent *)begin;\n      begin += entry->d_reclen;\n      if (entry->d_ino == 1) {\n        // Inode 1 is for bad blocks and also can be a reason for early return.\n        // Should be emitted if kernel tried to output terminating thread.\n        // See proc_task_readdir implementation in Linux.\n        result = Incomplete;\n      }\n      if (entry->d_ino && *entry->d_name >= '0' && *entry->d_name <= '9')\n        threads->push_back(internal_atoll(entry->d_name));\n    }\n\n    // Now we are going to detect short-read or early EOF. In such cases Linux\n    // can return inconsistent list with missing alive threads.\n    // Code will just remember that the list can be incomplete but it will\n    // continue reads to return as much as possible.\n    if (!first_read) {\n      // The first one was a short-read by definition.\n      result = Incomplete;\n    } else if (read > buffer_.size() - 1024) {\n      // Read was close to the buffer size. So double the size and assume the\n      // worst.\n      buffer_.resize(buffer_.size() * 2);\n      result = Incomplete;\n    } else if (!threads->empty() && !IsAlive(threads->back())) {\n      // Maybe Linux early returned from read on terminated thread (!pid_alive)\n      // and failed to restore read position.\n      // See next_tid and proc_task_instantiate in Linux.\n      result = Incomplete;\n    }\n  }\n}\n\nbool ThreadLister::IsAlive(int tid) {\n  // /proc/%d/task/%d/status uses same call to detect alive threads as\n  // proc_task_readdir. See task_state implementation in Linux.\n  char path[80];\n  internal_snprintf(path, sizeof(path), \"/proc/%d/task/%d/status\", pid_, tid);\n  if (!ReadFileToVector(path, &buffer_) || buffer_.empty())\n    return false;\n  buffer_.push_back(0);\n  static const char kPrefix[] = \"\\nPPid:\";\n  const char *field = internal_strstr(buffer_.data(), kPrefix);\n  if (!field)\n    return false;\n  field += internal_strlen(kPrefix);\n  return (int)internal_atoll(field) != 0;\n}\n\nThreadLister::~ThreadLister() {\n  if (!internal_iserror(descriptor_))\n    internal_close(descriptor_);\n}\n#endif\n\n#if SANITIZER_WORDSIZE == 32\n// Take care of unusable kernel area in top gigabyte.\nstatic uptr GetKernelAreaSize() {\n#if SANITIZER_LINUX && !SANITIZER_X32\n  const uptr gbyte = 1UL << 30;\n\n  // Firstly check if there are writable segments\n  // mapped to top gigabyte (e.g. stack).\n  MemoryMappingLayout proc_maps(/*cache_enabled*/true);\n  if (proc_maps.Error())\n    return 0;\n  MemoryMappedSegment segment;\n  while (proc_maps.Next(&segment)) {\n    if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;\n  }\n\n#if !SANITIZER_ANDROID\n  // Even if nothing is mapped, top Gb may still be accessible\n  // if we are running on 64-bit kernel.\n  // Uname may report misleading results if personality type\n  // is modified (e.g. under schroot) so check this as well.\n  struct utsname uname_info;\n  int pers = personality(0xffffffffUL);\n  if (!(pers & PER_MASK) && internal_uname(&uname_info) == 0 &&\n      internal_strstr(uname_info.machine, \"64\"))\n    return 0;\n#endif  // SANITIZER_ANDROID\n\n  // Top gigabyte is reserved for kernel.\n  return gbyte;\n#else\n  return 0;\n#endif  // SANITIZER_LINUX && !SANITIZER_X32\n}\n#endif  // SANITIZER_WORDSIZE == 32\n\nuptr GetMaxVirtualAddress() {\n#if SANITIZER_NETBSD && defined(__x86_64__)\n  return 0x7f7ffffff000ULL;  // (0x00007f8000000000 - PAGE_SIZE)\n#elif SANITIZER_WORDSIZE == 64\n# if defined(__powerpc64__) || defined(__aarch64__)\n  // On PowerPC64 we have two different address space layouts: 44- and 46-bit.\n  // We somehow need to figure out which one we are using now and choose\n  // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.\n  // Note that with 'ulimit -s unlimited' the stack is moved away from the top\n  // of the address space, so simply checking the stack address is not enough.\n  // This should (does) work for both PowerPC64 Endian modes.\n  // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.\n  return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;\n#elif SANITIZER_RISCV64\n  return (1ULL << 38) - 1;\n# elif defined(__mips64)\n  return (1ULL << 40) - 1;  // 0x000000ffffffffffUL;\n# elif defined(__s390x__)\n  return (1ULL << 53) - 1;  // 0x001fffffffffffffUL;\n#elif defined(__sparc__)\n  return ~(uptr)0;\n# else\n  return (1ULL << 47) - 1;  // 0x00007fffffffffffUL;\n# endif\n#else  // SANITIZER_WORDSIZE == 32\n# if defined(__s390__)\n  return (1ULL << 31) - 1;  // 0x7fffffff;\n# else\n  return (1ULL << 32) - 1;  // 0xffffffff;\n# endif\n#endif  // SANITIZER_WORDSIZE\n}\n\nuptr GetMaxUserVirtualAddress() {\n  uptr addr = GetMaxVirtualAddress();\n#if SANITIZER_WORDSIZE == 32 && !defined(__s390__)\n  if (!common_flags()->full_address_space)\n    addr -= GetKernelAreaSize();\n  CHECK_LT(reinterpret_cast<uptr>(&addr), addr);\n#endif\n  return addr;\n}\n\n#if !SANITIZER_ANDROID\nuptr GetPageSize() {\n#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \\\n    defined(EXEC_PAGESIZE)\n  return EXEC_PAGESIZE;\n#elif SANITIZER_FREEBSD || SANITIZER_NETBSD\n// Use sysctl as sysconf can trigger interceptors internally.\n  int pz = 0;\n  uptr pzl = sizeof(pz);\n  int mib[2] = {CTL_HW, HW_PAGESIZE};\n  int rv = internal_sysctl(mib, 2, &pz, &pzl, nullptr, 0);\n  CHECK_EQ(rv, 0);\n  return (uptr)pz;\n#elif SANITIZER_USE_GETAUXVAL\n  return getauxval(AT_PAGESZ);\n#else\n  return sysconf(_SC_PAGESIZE);  // EXEC_PAGESIZE may not be trustworthy.\n#endif\n}\n#endif // !SANITIZER_ANDROID\n\nuptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {\n#if SANITIZER_SOLARIS\n  const char *default_module_name = getexecname();\n  CHECK_NE(default_module_name, NULL);\n  return internal_snprintf(buf, buf_len, \"%s\", default_module_name);\n#else\n#if SANITIZER_FREEBSD || SANITIZER_NETBSD\n#if SANITIZER_FREEBSD\n  const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};\n#else\n  const int Mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};\n#endif\n  const char *default_module_name = \"kern.proc.pathname\";\n  uptr Size = buf_len;\n  bool IsErr =\n      (internal_sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);\n  int readlink_error = IsErr ? errno : 0;\n  uptr module_name_len = Size;\n#else\n  const char *default_module_name = \"/proc/self/exe\";\n  uptr module_name_len = internal_readlink(\n      default_module_name, buf, buf_len);\n  int readlink_error;\n  bool IsErr = internal_iserror(module_name_len, &readlink_error);\n#endif  // SANITIZER_SOLARIS\n  if (IsErr) {\n    // We can't read binary name for some reason, assume it's unknown.\n    Report(\"WARNING: reading executable name failed with errno %d, \"\n           \"some stack frames may not be symbolized\\n\", readlink_error);\n    module_name_len = internal_snprintf(buf, buf_len, \"%s\",\n                                        default_module_name);\n    CHECK_LT(module_name_len, buf_len);\n  }\n  return module_name_len;\n#endif\n}\n\nuptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {\n#if SANITIZER_LINUX\n  char *tmpbuf;\n  uptr tmpsize;\n  uptr tmplen;\n  if (ReadFileToBuffer(\"/proc/self/cmdline\", &tmpbuf, &tmpsize, &tmplen,\n                       1024 * 1024)) {\n    internal_strncpy(buf, tmpbuf, buf_len);\n    UnmapOrDie(tmpbuf, tmpsize);\n    return internal_strlen(buf);\n  }\n#endif\n  return ReadBinaryName(buf, buf_len);\n}\n\n// Match full names of the form /path/to/base_name{-,.}*\nbool LibraryNameIs(const char *full_name, const char *base_name) {\n  const char *name = full_name;\n  // Strip path.\n  while (*name != '\\0') name++;\n  while (name > full_name && *name != '/') name--;\n  if (*name == '/') name++;\n  uptr base_name_length = internal_strlen(base_name);\n  if (internal_strncmp(name, base_name, base_name_length)) return false;\n  return (name[base_name_length] == '-' || name[base_name_length] == '.');\n}\n\n#if !SANITIZER_ANDROID\n// Call cb for each region mapped by map.\nvoid ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {\n  CHECK_NE(map, nullptr);\n#if !SANITIZER_FREEBSD\n  typedef ElfW(Phdr) Elf_Phdr;\n  typedef ElfW(Ehdr) Elf_Ehdr;\n#endif // !SANITIZER_FREEBSD\n  char *base = (char *)map->l_addr;\n  Elf_Ehdr *ehdr = (Elf_Ehdr *)base;\n  char *phdrs = base + ehdr->e_phoff;\n  char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize;\n\n  // Find the segment with the minimum base so we can \"relocate\" the p_vaddr\n  // fields.  Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC\n  // objects have a non-zero base.\n  uptr preferred_base = (uptr)-1;\n  for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {\n    Elf_Phdr *phdr = (Elf_Phdr *)iter;\n    if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr)\n      preferred_base = (uptr)phdr->p_vaddr;\n  }\n\n  // Compute the delta from the real base to get a relocation delta.\n  sptr delta = (uptr)base - preferred_base;\n  // Now we can figure out what the loader really mapped.\n  for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {\n    Elf_Phdr *phdr = (Elf_Phdr *)iter;\n    if (phdr->p_type == PT_LOAD) {\n      uptr seg_start = phdr->p_vaddr + delta;\n      uptr seg_end = seg_start + phdr->p_memsz;\n      // None of these values are aligned.  We consider the ragged edges of the\n      // load command as defined, since they are mapped from the file.\n      seg_start = RoundDownTo(seg_start, GetPageSizeCached());\n      seg_end = RoundUpTo(seg_end, GetPageSizeCached());\n      cb((void *)seg_start, seg_end - seg_start);\n    }\n  }\n}\n#endif\n\n#if SANITIZER_LINUX\n#if defined(__x86_64__)\n// We cannot use glibc's clone wrapper, because it messes with the child\n// task's TLS. It writes the PID and TID of the child task to its thread\n// descriptor, but in our case the child task shares the thread descriptor with\n// the parent (because we don't know how to allocate a new thread\n// descriptor to keep glibc happy). So the stock version of clone(), when\n// used with CLONE_VM, would end up corrupting the parent's thread descriptor.\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  long long res;\n  if (!fn || !child_stack)\n    return -EINVAL;\n  CHECK_EQ(0, (uptr)child_stack % 16);\n  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);\n  ((unsigned long long *)child_stack)[0] = (uptr)fn;\n  ((unsigned long long *)child_stack)[1] = (uptr)arg;\n  register void *r8 __asm__(\"r8\") = newtls;\n  register int *r10 __asm__(\"r10\") = child_tidptr;\n  __asm__ __volatile__(\n                       /* %rax = syscall(%rax = SYSCALL(clone),\n                        *                %rdi = flags,\n                        *                %rsi = child_stack,\n                        *                %rdx = parent_tidptr,\n                        *                %r8  = new_tls,\n                        *                %r10 = child_tidptr)\n                        */\n                       \"syscall\\n\"\n\n                       /* if (%rax != 0)\n                        *   return;\n                        */\n                       \"testq  %%rax,%%rax\\n\"\n                       \"jnz    1f\\n\"\n\n                       /* In the child. Terminate unwind chain. */\n                       // XXX: We should also terminate the CFI unwind chain\n                       // here. Unfortunately clang 3.2 doesn't support the\n                       // necessary CFI directives, so we skip that part.\n                       \"xorq   %%rbp,%%rbp\\n\"\n\n                       /* Call \"fn(arg)\". */\n                       \"popq   %%rax\\n\"\n                       \"popq   %%rdi\\n\"\n                       \"call   *%%rax\\n\"\n\n                       /* Call _exit(%rax). */\n                       \"movq   %%rax,%%rdi\\n\"\n                       \"movq   %2,%%rax\\n\"\n                       \"syscall\\n\"\n\n                       /* Return to parent. */\n                     \"1:\\n\"\n                       : \"=a\" (res)\n                       : \"a\"(SYSCALL(clone)), \"i\"(SYSCALL(exit)),\n                         \"S\"(child_stack),\n                         \"D\"(flags),\n                         \"d\"(parent_tidptr),\n                         \"r\"(r8),\n                         \"r\"(r10)\n                       : \"memory\", \"r11\", \"rcx\");\n  return res;\n}\n#elif defined(__mips__)\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  long long res;\n  if (!fn || !child_stack)\n    return -EINVAL;\n  CHECK_EQ(0, (uptr)child_stack % 16);\n  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);\n  ((unsigned long long *)child_stack)[0] = (uptr)fn;\n  ((unsigned long long *)child_stack)[1] = (uptr)arg;\n  register void *a3 __asm__(\"$7\") = newtls;\n  register int *a4 __asm__(\"$8\") = child_tidptr;\n  // We don't have proper CFI directives here because it requires alot of code\n  // for very marginal benefits.\n  __asm__ __volatile__(\n                       /* $v0 = syscall($v0 = __NR_clone,\n                        * $a0 = flags,\n                        * $a1 = child_stack,\n                        * $a2 = parent_tidptr,\n                        * $a3 = new_tls,\n                        * $a4 = child_tidptr)\n                        */\n                       \".cprestore 16;\\n\"\n                       \"move $4,%1;\\n\"\n                       \"move $5,%2;\\n\"\n                       \"move $6,%3;\\n\"\n                       \"move $7,%4;\\n\"\n                       /* Store the fifth argument on stack\n                        * if we are using 32-bit abi.\n                        */\n#if SANITIZER_WORDSIZE == 32\n                       \"lw %5,16($29);\\n\"\n#else\n                       \"move $8,%5;\\n\"\n#endif\n                       \"li $2,%6;\\n\"\n                       \"syscall;\\n\"\n\n                       /* if ($v0 != 0)\n                        * return;\n                        */\n                       \"bnez $2,1f;\\n\"\n\n                       /* Call \"fn(arg)\". */\n#if SANITIZER_WORDSIZE == 32\n#ifdef __BIG_ENDIAN__\n                       \"lw $25,4($29);\\n\"\n                       \"lw $4,12($29);\\n\"\n#else\n                       \"lw $25,0($29);\\n\"\n                       \"lw $4,8($29);\\n\"\n#endif\n#else\n                       \"ld $25,0($29);\\n\"\n                       \"ld $4,8($29);\\n\"\n#endif\n                       \"jal $25;\\n\"\n\n                       /* Call _exit($v0). */\n                       \"move $4,$2;\\n\"\n                       \"li $2,%7;\\n\"\n                       \"syscall;\\n\"\n\n                       /* Return to parent. */\n                     \"1:\\n\"\n                       : \"=r\" (res)\n                       : \"r\"(flags),\n                         \"r\"(child_stack),\n                         \"r\"(parent_tidptr),\n                         \"r\"(a3),\n                         \"r\"(a4),\n                         \"i\"(__NR_clone),\n                         \"i\"(__NR_exit)\n                       : \"memory\", \"$29\" );\n  return res;\n}\n#elif SANITIZER_RISCV64\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  if (!fn || !child_stack)\n    return -EINVAL;\n\n  CHECK_EQ(0, (uptr)child_stack % 16);\n\n  register int res __asm__(\"a0\");\n  register int __flags __asm__(\"a0\") = flags;\n  register void *__stack __asm__(\"a1\") = child_stack;\n  register int *__ptid __asm__(\"a2\") = parent_tidptr;\n  register void *__tls __asm__(\"a3\") = newtls;\n  register int *__ctid __asm__(\"a4\") = child_tidptr;\n  register int (*__fn)(void *) __asm__(\"a5\") = fn;\n  register void *__arg __asm__(\"a6\") = arg;\n  register int nr_clone __asm__(\"a7\") = __NR_clone;\n\n  __asm__ __volatile__(\n      \"ecall\\n\"\n\n      /* if (a0 != 0)\n       *   return a0;\n       */\n      \"bnez a0, 1f\\n\"\n\n      // In the child, now. Call \"fn(arg)\".\n      \"mv a0, a6\\n\"\n      \"jalr a5\\n\"\n\n      // Call _exit(a0).\n      \"addi a7, zero, %9\\n\"\n      \"ecall\\n\"\n      \"1:\\n\"\n\n      : \"=r\"(res)\n      : \"0\"(__flags), \"r\"(__stack), \"r\"(__ptid), \"r\"(__tls), \"r\"(__ctid),\n        \"r\"(__fn), \"r\"(__arg), \"r\"(nr_clone), \"i\"(__NR_exit)\n      : \"memory\");\n  return res;\n}\n#elif defined(__aarch64__)\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  register long long res __asm__(\"x0\");\n  if (!fn || !child_stack)\n    return -EINVAL;\n  CHECK_EQ(0, (uptr)child_stack % 16);\n  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);\n  ((unsigned long long *)child_stack)[0] = (uptr)fn;\n  ((unsigned long long *)child_stack)[1] = (uptr)arg;\n\n  register int (*__fn)(void *)  __asm__(\"x0\") = fn;\n  register void *__stack __asm__(\"x1\") = child_stack;\n  register int   __flags __asm__(\"x2\") = flags;\n  register void *__arg   __asm__(\"x3\") = arg;\n  register int  *__ptid  __asm__(\"x4\") = parent_tidptr;\n  register void *__tls   __asm__(\"x5\") = newtls;\n  register int  *__ctid  __asm__(\"x6\") = child_tidptr;\n\n  __asm__ __volatile__(\n                       \"mov x0,x2\\n\" /* flags  */\n                       \"mov x2,x4\\n\" /* ptid  */\n                       \"mov x3,x5\\n\" /* tls  */\n                       \"mov x4,x6\\n\" /* ctid  */\n                       \"mov x8,%9\\n\" /* clone  */\n\n                       \"svc 0x0\\n\"\n\n                       /* if (%r0 != 0)\n                        *   return %r0;\n                        */\n                       \"cmp x0, #0\\n\"\n                       \"bne 1f\\n\"\n\n                       /* In the child, now. Call \"fn(arg)\". */\n                       \"ldp x1, x0, [sp], #16\\n\"\n                       \"blr x1\\n\"\n\n                       /* Call _exit(%r0).  */\n                       \"mov x8, %10\\n\"\n                       \"svc 0x0\\n\"\n                     \"1:\\n\"\n\n                       : \"=r\" (res)\n                       : \"i\"(-EINVAL),\n                         \"r\"(__fn), \"r\"(__stack), \"r\"(__flags), \"r\"(__arg),\n                         \"r\"(__ptid), \"r\"(__tls), \"r\"(__ctid),\n                         \"i\"(__NR_clone), \"i\"(__NR_exit)\n                       : \"x30\", \"memory\");\n  return res;\n}\n#elif defined(__powerpc64__)\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                   int *parent_tidptr, void *newtls, int *child_tidptr) {\n  long long res;\n// Stack frame structure.\n#if SANITIZER_PPC64V1\n//   Back chain == 0        (SP + 112)\n// Frame (112 bytes):\n//   Parameter save area    (SP + 48), 8 doublewords\n//   TOC save area          (SP + 40)\n//   Link editor doubleword (SP + 32)\n//   Compiler doubleword    (SP + 24)\n//   LR save area           (SP + 16)\n//   CR save area           (SP + 8)\n//   Back chain             (SP + 0)\n# define FRAME_SIZE 112\n# define FRAME_TOC_SAVE_OFFSET 40\n#elif SANITIZER_PPC64V2\n//   Back chain == 0        (SP + 32)\n// Frame (32 bytes):\n//   TOC save area          (SP + 24)\n//   LR save area           (SP + 16)\n//   CR save area           (SP + 8)\n//   Back chain             (SP + 0)\n# define FRAME_SIZE 32\n# define FRAME_TOC_SAVE_OFFSET 24\n#else\n# error \"Unsupported PPC64 ABI\"\n#endif\n  if (!fn || !child_stack)\n    return -EINVAL;\n  CHECK_EQ(0, (uptr)child_stack % 16);\n\n  register int (*__fn)(void *) __asm__(\"r3\") = fn;\n  register void *__cstack      __asm__(\"r4\") = child_stack;\n  register int __flags         __asm__(\"r5\") = flags;\n  register void *__arg         __asm__(\"r6\") = arg;\n  register int *__ptidptr      __asm__(\"r7\") = parent_tidptr;\n  register void *__newtls      __asm__(\"r8\") = newtls;\n  register int *__ctidptr      __asm__(\"r9\") = child_tidptr;\n\n __asm__ __volatile__(\n           /* fn and arg are saved across the syscall */\n           \"mr 28, %5\\n\\t\"\n           \"mr 27, %8\\n\\t\"\n\n           /* syscall\n             r0 == __NR_clone\n             r3 == flags\n             r4 == child_stack\n             r5 == parent_tidptr\n             r6 == newtls\n             r7 == child_tidptr */\n           \"mr 3, %7\\n\\t\"\n           \"mr 5, %9\\n\\t\"\n           \"mr 6, %10\\n\\t\"\n           \"mr 7, %11\\n\\t\"\n           \"li 0, %3\\n\\t\"\n           \"sc\\n\\t\"\n\n           /* Test if syscall was successful */\n           \"cmpdi  cr1, 3, 0\\n\\t\"\n           \"crandc cr1*4+eq, cr1*4+eq, cr0*4+so\\n\\t\"\n           \"bne-   cr1, 1f\\n\\t\"\n\n           /* Set up stack frame */\n           \"li    29, 0\\n\\t\"\n           \"stdu  29, -8(1)\\n\\t\"\n           \"stdu  1, -%12(1)\\n\\t\"\n           /* Do the function call */\n           \"std   2, %13(1)\\n\\t\"\n#if SANITIZER_PPC64V1\n           \"ld    0, 0(28)\\n\\t\"\n           \"ld    2, 8(28)\\n\\t\"\n           \"mtctr 0\\n\\t\"\n#elif SANITIZER_PPC64V2\n           \"mr    12, 28\\n\\t\"\n           \"mtctr 12\\n\\t\"\n#else\n# error \"Unsupported PPC64 ABI\"\n#endif\n           \"mr    3, 27\\n\\t\"\n           \"bctrl\\n\\t\"\n           \"ld    2, %13(1)\\n\\t\"\n\n           /* Call _exit(r3) */\n           \"li 0, %4\\n\\t\"\n           \"sc\\n\\t\"\n\n           /* Return to parent */\n           \"1:\\n\\t\"\n           \"mr %0, 3\\n\\t\"\n             : \"=r\" (res)\n             : \"0\" (-1),\n               \"i\" (EINVAL),\n               \"i\" (__NR_clone),\n               \"i\" (__NR_exit),\n               \"r\" (__fn),\n               \"r\" (__cstack),\n               \"r\" (__flags),\n               \"r\" (__arg),\n               \"r\" (__ptidptr),\n               \"r\" (__newtls),\n               \"r\" (__ctidptr),\n               \"i\" (FRAME_SIZE),\n               \"i\" (FRAME_TOC_SAVE_OFFSET)\n             : \"cr0\", \"cr1\", \"memory\", \"ctr\", \"r0\", \"r27\", \"r28\", \"r29\");\n  return res;\n}\n#elif defined(__i386__)\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  int res;\n  if (!fn || !child_stack)\n    return -EINVAL;\n  CHECK_EQ(0, (uptr)child_stack % 16);\n  child_stack = (char *)child_stack - 7 * sizeof(unsigned int);\n  ((unsigned int *)child_stack)[0] = (uptr)flags;\n  ((unsigned int *)child_stack)[1] = (uptr)0;\n  ((unsigned int *)child_stack)[2] = (uptr)fn;\n  ((unsigned int *)child_stack)[3] = (uptr)arg;\n  __asm__ __volatile__(\n                       /* %eax = syscall(%eax = SYSCALL(clone),\n                        *                %ebx = flags,\n                        *                %ecx = child_stack,\n                        *                %edx = parent_tidptr,\n                        *                %esi  = new_tls,\n                        *                %edi = child_tidptr)\n                        */\n\n                        /* Obtain flags */\n                        \"movl    (%%ecx), %%ebx\\n\"\n                        /* Do the system call */\n                        \"pushl   %%ebx\\n\"\n                        \"pushl   %%esi\\n\"\n                        \"pushl   %%edi\\n\"\n                        /* Remember the flag value.  */\n                        \"movl    %%ebx, (%%ecx)\\n\"\n                        \"int     $0x80\\n\"\n                        \"popl    %%edi\\n\"\n                        \"popl    %%esi\\n\"\n                        \"popl    %%ebx\\n\"\n\n                        /* if (%eax != 0)\n                         *   return;\n                         */\n\n                        \"test    %%eax,%%eax\\n\"\n                        \"jnz    1f\\n\"\n\n                        /* terminate the stack frame */\n                        \"xorl   %%ebp,%%ebp\\n\"\n                        /* Call FN. */\n                        \"call    *%%ebx\\n\"\n#ifdef PIC\n                        \"call    here\\n\"\n                        \"here:\\n\"\n                        \"popl    %%ebx\\n\"\n                        \"addl    $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\\n\"\n#endif\n                        /* Call exit */\n                        \"movl    %%eax, %%ebx\\n\"\n                        \"movl    %2, %%eax\\n\"\n                        \"int     $0x80\\n\"\n                        \"1:\\n\"\n                       : \"=a\" (res)\n                       : \"a\"(SYSCALL(clone)), \"i\"(SYSCALL(exit)),\n                         \"c\"(child_stack),\n                         \"d\"(parent_tidptr),\n                         \"S\"(newtls),\n                         \"D\"(child_tidptr)\n                       : \"memory\");\n  return res;\n}\n#elif defined(__arm__)\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  unsigned int res;\n  if (!fn || !child_stack)\n    return -EINVAL;\n  child_stack = (char *)child_stack - 2 * sizeof(unsigned int);\n  ((unsigned int *)child_stack)[0] = (uptr)fn;\n  ((unsigned int *)child_stack)[1] = (uptr)arg;\n  register int r0 __asm__(\"r0\") = flags;\n  register void *r1 __asm__(\"r1\") = child_stack;\n  register int *r2 __asm__(\"r2\") = parent_tidptr;\n  register void *r3 __asm__(\"r3\") = newtls;\n  register int *r4 __asm__(\"r4\") = child_tidptr;\n  register int r7 __asm__(\"r7\") = __NR_clone;\n\n#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)\n# define ARCH_HAS_BX\n#endif\n#if __ARM_ARCH > 4\n# define ARCH_HAS_BLX\n#endif\n\n#ifdef ARCH_HAS_BX\n# ifdef ARCH_HAS_BLX\n#  define BLX(R) \"blx \"  #R \"\\n\"\n# else\n#  define BLX(R) \"mov lr, pc; bx \" #R \"\\n\"\n# endif\n#else\n# define BLX(R)  \"mov lr, pc; mov pc,\" #R \"\\n\"\n#endif\n\n  __asm__ __volatile__(\n                       /* %r0 = syscall(%r7 = SYSCALL(clone),\n                        *               %r0 = flags,\n                        *               %r1 = child_stack,\n                        *               %r2 = parent_tidptr,\n                        *               %r3  = new_tls,\n                        *               %r4 = child_tidptr)\n                        */\n\n                       /* Do the system call */\n                       \"swi 0x0\\n\"\n\n                       /* if (%r0 != 0)\n                        *   return %r0;\n                        */\n                       \"cmp r0, #0\\n\"\n                       \"bne 1f\\n\"\n\n                       /* In the child, now. Call \"fn(arg)\". */\n                       \"ldr r0, [sp, #4]\\n\"\n                       \"ldr ip, [sp], #8\\n\"\n                       BLX(ip)\n                       /* Call _exit(%r0). */\n                       \"mov r7, %7\\n\"\n                       \"swi 0x0\\n\"\n                       \"1:\\n\"\n                       \"mov %0, r0\\n\"\n                       : \"=r\"(res)\n                       : \"r\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3), \"r\"(r4), \"r\"(r7),\n                         \"i\"(__NR_exit)\n                       : \"memory\");\n  return res;\n}\n#endif\n#endif  // SANITIZER_LINUX\n\n#if SANITIZER_LINUX\nint internal_uname(struct utsname *buf) {\n  return internal_syscall(SYSCALL(uname), buf);\n}\n#endif\n\n#if SANITIZER_ANDROID\n#if __ANDROID_API__ < 21\nextern \"C\" __attribute__((weak)) int dl_iterate_phdr(\n    int (*)(struct dl_phdr_info *, size_t, void *), void *);\n#endif\n\nstatic int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,\n                                   void *data) {\n  // Any name starting with \"lib\" indicates a bug in L where library base names\n  // are returned instead of paths.\n  if (info->dlpi_name && info->dlpi_name[0] == 'l' &&\n      info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') {\n    *(bool *)data = true;\n    return 1;\n  }\n  return 0;\n}\n\nstatic atomic_uint32_t android_api_level;\n\nstatic AndroidApiLevel AndroidDetectApiLevelStatic() {\n#if __ANDROID_API__ <= 19\n  return ANDROID_KITKAT;\n#elif __ANDROID_API__ <= 22\n  return ANDROID_LOLLIPOP_MR1;\n#else\n  return ANDROID_POST_LOLLIPOP;\n#endif\n}\n\nstatic AndroidApiLevel AndroidDetectApiLevel() {\n  if (!&dl_iterate_phdr)\n    return ANDROID_KITKAT; // K or lower\n  bool base_name_seen = false;\n  dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);\n  if (base_name_seen)\n    return ANDROID_LOLLIPOP_MR1; // L MR1\n  return ANDROID_POST_LOLLIPOP;   // post-L\n  // Plain L (API level 21) is completely broken wrt ASan and not very\n  // interesting to detect.\n}\n\nextern \"C\" __attribute__((weak)) void* _DYNAMIC;\n\nAndroidApiLevel AndroidGetApiLevel() {\n  AndroidApiLevel level =\n      (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);\n  if (level) return level;\n  level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic()\n                               : AndroidDetectApiLevel();\n  atomic_store(&android_api_level, level, memory_order_relaxed);\n  return level;\n}\n\n#endif\n\nstatic HandleSignalMode GetHandleSignalModeImpl(int signum) {\n  switch (signum) {\n    case SIGABRT:\n      return common_flags()->handle_abort;\n    case SIGILL:\n      return common_flags()->handle_sigill;\n    case SIGTRAP:\n      return common_flags()->handle_sigtrap;\n    case SIGFPE:\n      return common_flags()->handle_sigfpe;\n    case SIGSEGV:\n      return common_flags()->handle_segv;\n    case SIGBUS:\n      return common_flags()->handle_sigbus;\n  }\n  return kHandleSignalNo;\n}\n\nHandleSignalMode GetHandleSignalMode(int signum) {\n  HandleSignalMode result = GetHandleSignalModeImpl(signum);\n  if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)\n    return kHandleSignalExclusive;\n  return result;\n}\n\n#if !SANITIZER_GO\nvoid *internal_start_thread(void *(*func)(void *arg), void *arg) {\n  if (&real_pthread_create == 0)\n    return nullptr;\n  // Start the thread with signals blocked, otherwise it can steal user signals.\n  ScopedBlockSignals block(nullptr);\n  void *th;\n  real_pthread_create(&th, nullptr, func, arg);\n  return th;\n}\n\nvoid internal_join_thread(void *th) {\n  if (&real_pthread_join)\n    real_pthread_join(th, nullptr);\n}\n#else\nvoid *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }\n\nvoid internal_join_thread(void *th) {}\n#endif\n\n#if defined(__aarch64__)\n// Android headers in the older NDK releases miss this definition.\nstruct __sanitizer_esr_context {\n  struct _aarch64_ctx head;\n  uint64_t esr;\n};\n\nstatic bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {\n  static const u32 kEsrMagic = 0x45535201;\n  u8 *aux = reinterpret_cast<u8 *>(ucontext->uc_mcontext.__reserved);\n  while (true) {\n    _aarch64_ctx *ctx = (_aarch64_ctx *)aux;\n    if (ctx->size == 0) break;\n    if (ctx->magic == kEsrMagic) {\n      *esr = ((__sanitizer_esr_context *)ctx)->esr;\n      return true;\n    }\n    aux += ctx->size;\n  }\n  return false;\n}\n#endif\n\nusing Context = ucontext_t;\n\nSignalContext::WriteFlag SignalContext::GetWriteFlag() const {\n  Context *ucontext = (Context *)context;\n#if defined(__x86_64__) || defined(__i386__)\n  static const uptr PF_WRITE = 1U << 1;\n#if SANITIZER_FREEBSD\n  uptr err = ucontext->uc_mcontext.mc_err;\n#elif SANITIZER_NETBSD\n  uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];\n#elif SANITIZER_SOLARIS && defined(__i386__)\n  const int Err = 13;\n  uptr err = ucontext->uc_mcontext.gregs[Err];\n#else\n  uptr err = ucontext->uc_mcontext.gregs[REG_ERR];\n#endif // SANITIZER_FREEBSD\n  return err & PF_WRITE ? Write : Read;\n#elif defined(__mips__)\n  uint32_t *exception_source;\n  uint32_t faulty_instruction;\n  uint32_t op_code;\n\n  exception_source = (uint32_t *)ucontext->uc_mcontext.pc;\n  faulty_instruction = (uint32_t)(*exception_source);\n\n  op_code = (faulty_instruction >> 26) & 0x3f;\n\n  // FIXME: Add support for FPU, microMIPS, DSP, MSA memory instructions.\n  switch (op_code) {\n    case 0x28:  // sb\n    case 0x29:  // sh\n    case 0x2b:  // sw\n    case 0x3f:  // sd\n#if __mips_isa_rev < 6\n    case 0x2c:  // sdl\n    case 0x2d:  // sdr\n    case 0x2a:  // swl\n    case 0x2e:  // swr\n#endif\n      return SignalContext::Write;\n\n    case 0x20:  // lb\n    case 0x24:  // lbu\n    case 0x21:  // lh\n    case 0x25:  // lhu\n    case 0x23:  // lw\n    case 0x27:  // lwu\n    case 0x37:  // ld\n#if __mips_isa_rev < 6\n    case 0x1a:  // ldl\n    case 0x1b:  // ldr\n    case 0x22:  // lwl\n    case 0x26:  // lwr\n#endif\n      return SignalContext::Read;\n#if __mips_isa_rev == 6\n    case 0x3b:  // pcrel\n      op_code = (faulty_instruction >> 19) & 0x3;\n      switch (op_code) {\n        case 0x1:  // lwpc\n        case 0x2:  // lwupc\n          return SignalContext::Read;\n      }\n#endif\n  }\n  return SignalContext::Unknown;\n#elif defined(__arm__)\n  static const uptr FSR_WRITE = 1U << 11;\n  uptr fsr = ucontext->uc_mcontext.error_code;\n  return fsr & FSR_WRITE ? Write : Read;\n#elif defined(__aarch64__)\n  static const u64 ESR_ELx_WNR = 1U << 6;\n  u64 esr;\n  if (!Aarch64GetESR(ucontext, &esr)) return Unknown;\n  return esr & ESR_ELx_WNR ? Write : Read;\n#elif defined(__sparc__)\n  // Decode the instruction to determine the access type.\n  // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).\n#if SANITIZER_SOLARIS\n  uptr pc = ucontext->uc_mcontext.gregs[REG_PC];\n#else\n  // Historical BSDism here.\n  struct sigcontext *scontext = (struct sigcontext *)context;\n#if defined(__arch64__)\n  uptr pc = scontext->sigc_regs.tpc;\n#else\n  uptr pc = scontext->si_regs.pc;\n#endif\n#endif\n  u32 instr = *(u32 *)pc;\n  return (instr >> 21) & 1 ? Write: Read;\n#elif defined(__riscv)\n#if SANITIZER_FREEBSD\n  unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;\n#else\n  unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];\n#endif\n  unsigned faulty_instruction = *(uint16_t *)pc;\n\n#if defined(__riscv_compressed)\n  if ((faulty_instruction & 0x3) != 0x3) {  // it's a compressed instruction\n    // set op_bits to the instruction bits [1, 0, 15, 14, 13]\n    unsigned op_bits =\n        ((faulty_instruction & 0x3) << 3) | (faulty_instruction >> 13);\n    unsigned rd = faulty_instruction & 0xF80;  // bits 7-11, inclusive\n    switch (op_bits) {\n      case 0b10'010:  // c.lwsp (rd != x0)\n#if __riscv_xlen == 64\n      case 0b10'011:  // c.ldsp (rd != x0)\n#endif\n        return rd ? SignalContext::Read : SignalContext::Unknown;\n      case 0b00'010:  // c.lw\n#if __riscv_flen >= 32 && __riscv_xlen == 32\n      case 0b10'011:  // c.flwsp\n#endif\n#if __riscv_flen >= 32 || __riscv_xlen == 64\n      case 0b00'011:  // c.flw / c.ld\n#endif\n#if __riscv_flen == 64\n      case 0b00'001:  // c.fld\n      case 0b10'001:  // c.fldsp\n#endif\n        return SignalContext::Read;\n      case 0b00'110:  // c.sw\n      case 0b10'110:  // c.swsp\n#if __riscv_flen >= 32 || __riscv_xlen == 64\n      case 0b00'111:  // c.fsw / c.sd\n      case 0b10'111:  // c.fswsp / c.sdsp\n#endif\n#if __riscv_flen == 64\n      case 0b00'101:  // c.fsd\n      case 0b10'101:  // c.fsdsp\n#endif\n        return SignalContext::Write;\n      default:\n        return SignalContext::Unknown;\n    }\n  }\n#endif\n\n  unsigned opcode = faulty_instruction & 0x7f;         // lower 7 bits\n  unsigned funct3 = (faulty_instruction >> 12) & 0x7;  // bits 12-14, inclusive\n  switch (opcode) {\n    case 0b0000011:  // loads\n      switch (funct3) {\n        case 0b000:  // lb\n        case 0b001:  // lh\n        case 0b010:  // lw\n#if __riscv_xlen == 64\n        case 0b011:  // ld\n#endif\n        case 0b100:  // lbu\n        case 0b101:  // lhu\n          return SignalContext::Read;\n        default:\n          return SignalContext::Unknown;\n      }\n    case 0b0100011:  // stores\n      switch (funct3) {\n        case 0b000:  // sb\n        case 0b001:  // sh\n        case 0b010:  // sw\n#if __riscv_xlen == 64\n        case 0b011:  // sd\n#endif\n          return SignalContext::Write;\n        default:\n          return SignalContext::Unknown;\n      }\n#if __riscv_flen >= 32\n    case 0b0000111:  // floating-point loads\n      switch (funct3) {\n        case 0b010:  // flw\n#if __riscv_flen == 64\n        case 0b011:  // fld\n#endif\n          return SignalContext::Read;\n        default:\n          return SignalContext::Unknown;\n      }\n    case 0b0100111:  // floating-point stores\n      switch (funct3) {\n        case 0b010:  // fsw\n#if __riscv_flen == 64\n        case 0b011:  // fsd\n#endif\n          return SignalContext::Write;\n        default:\n          return SignalContext::Unknown;\n      }\n#endif\n    default:\n      return SignalContext::Unknown;\n  }\n#else\n  (void)ucontext;\n  return Unknown;  // FIXME: Implement.\n#endif\n}\n\nbool SignalContext::IsTrueFaultingAddress() const {\n  auto si = static_cast<const siginfo_t *>(siginfo);\n  // SIGSEGV signals without a true fault address have si_code set to 128.\n  return si->si_signo == SIGSEGV && si->si_code != 128;\n}\n\nvoid SignalContext::DumpAllRegisters(void *context) {\n  // FIXME: Implement this.\n}\n\nstatic void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {\n#if SANITIZER_NETBSD\n  // This covers all NetBSD architectures\n  ucontext_t *ucontext = (ucontext_t *)context;\n  *pc = _UC_MACHINE_PC(ucontext);\n  *bp = _UC_MACHINE_FP(ucontext);\n  *sp = _UC_MACHINE_SP(ucontext);\n#elif defined(__arm__)\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.arm_pc;\n  *bp = ucontext->uc_mcontext.arm_fp;\n  *sp = ucontext->uc_mcontext.arm_sp;\n#elif defined(__aarch64__)\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.pc;\n  *bp = ucontext->uc_mcontext.regs[29];\n  *sp = ucontext->uc_mcontext.sp;\n#elif defined(__hppa__)\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.sc_iaoq[0];\n  /* GCC uses %r3 whenever a frame pointer is needed.  */\n  *bp = ucontext->uc_mcontext.sc_gr[3];\n  *sp = ucontext->uc_mcontext.sc_gr[30];\n#elif defined(__x86_64__)\n# if SANITIZER_FREEBSD\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.mc_rip;\n  *bp = ucontext->uc_mcontext.mc_rbp;\n  *sp = ucontext->uc_mcontext.mc_rsp;\n# else\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.gregs[REG_RIP];\n  *bp = ucontext->uc_mcontext.gregs[REG_RBP];\n  *sp = ucontext->uc_mcontext.gregs[REG_RSP];\n# endif\n#elif defined(__i386__)\n# if SANITIZER_FREEBSD\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.mc_eip;\n  *bp = ucontext->uc_mcontext.mc_ebp;\n  *sp = ucontext->uc_mcontext.mc_esp;\n# else\n  ucontext_t *ucontext = (ucontext_t*)context;\n# if SANITIZER_SOLARIS\n  /* Use the numeric values: the symbolic ones are undefined by llvm\n     include/llvm/Support/Solaris.h.  */\n# ifndef REG_EIP\n#  define REG_EIP 14 // REG_PC\n# endif\n# ifndef REG_EBP\n#  define REG_EBP  6 // REG_FP\n# endif\n# ifndef REG_UESP\n#  define REG_UESP 17 // REG_SP\n# endif\n# endif\n  *pc = ucontext->uc_mcontext.gregs[REG_EIP];\n  *bp = ucontext->uc_mcontext.gregs[REG_EBP];\n  *sp = ucontext->uc_mcontext.gregs[REG_UESP];\n# endif\n#elif defined(__powerpc__) || defined(__powerpc64__)\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.regs->nip;\n  *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];\n  // The powerpc{,64}-linux ABIs do not specify r31 as the frame\n  // pointer, but GCC always uses r31 when we need a frame pointer.\n  *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];\n#elif defined(__sparc__)\n#if defined(__arch64__) || defined(__sparcv9)\n#define STACK_BIAS 2047\n#else\n#define STACK_BIAS 0\n# endif\n# if SANITIZER_SOLARIS\n  ucontext_t *ucontext = (ucontext_t *)context;\n  *pc = ucontext->uc_mcontext.gregs[REG_PC];\n  *sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS;\n#else\n  // Historical BSDism here.\n  struct sigcontext *scontext = (struct sigcontext *)context;\n#if defined(__arch64__)\n  *pc = scontext->sigc_regs.tpc;\n  *sp = scontext->sigc_regs.u_regs[14] + STACK_BIAS;\n#else\n  *pc = scontext->si_regs.pc;\n  *sp = scontext->si_regs.u_regs[14];\n#endif\n# endif\n  *bp = (uptr)((uhwptr *)*sp)[14] + STACK_BIAS;\n#elif defined(__mips__)\n  ucontext_t *ucontext = (ucontext_t*)context;\n  *pc = ucontext->uc_mcontext.pc;\n  *bp = ucontext->uc_mcontext.gregs[30];\n  *sp = ucontext->uc_mcontext.gregs[29];\n#elif defined(__s390__)\n  ucontext_t *ucontext = (ucontext_t*)context;\n# if defined(__s390x__)\n  *pc = ucontext->uc_mcontext.psw.addr;\n# else\n  *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff;\n# endif\n  *bp = ucontext->uc_mcontext.gregs[11];\n  *sp = ucontext->uc_mcontext.gregs[15];\n#elif defined(__riscv)\n  ucontext_t *ucontext = (ucontext_t*)context;\n#    if SANITIZER_FREEBSD\n  *pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;\n  *bp = ucontext->uc_mcontext.mc_gpregs.gp_s[0];\n  *sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;\n#    else\n  *pc = ucontext->uc_mcontext.__gregs[REG_PC];\n  *bp = ucontext->uc_mcontext.__gregs[REG_S0];\n  *sp = ucontext->uc_mcontext.__gregs[REG_SP];\n#    endif\n#  elif defined(__hexagon__)\n  ucontext_t *ucontext = (ucontext_t *)context;\n  *pc = ucontext->uc_mcontext.pc;\n  *bp = ucontext->uc_mcontext.r30;\n  *sp = ucontext->uc_mcontext.r29;\n#  else\n#    error \"Unsupported arch\"\n#  endif\n}\n\nvoid SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }\n\nvoid InitializePlatformEarly() {\n  // Do nothing.\n}\n\nvoid MaybeReexec() {\n  // No need to re-exec on Linux.\n}\n\nvoid CheckASLR() {\n#if SANITIZER_NETBSD\n  int mib[3];\n  int paxflags;\n  uptr len = sizeof(paxflags);\n\n  mib[0] = CTL_PROC;\n  mib[1] = internal_getpid();\n  mib[2] = PROC_PID_PAXFLAGS;\n\n  if (UNLIKELY(internal_sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) {\n    Printf(\"sysctl failed\\n\");\n    Die();\n  }\n\n  if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) {\n    Printf(\"This sanitizer is not compatible with enabled ASLR.\\n\"\n           \"To disable ASLR, please run \\\"paxctl +a %s\\\" and try again.\\n\",\n           GetArgv()[0]);\n    Die();\n  }\n#elif SANITIZER_PPC64V2\n  // Disable ASLR for Linux PPC64LE.\n  int old_personality = personality(0xffffffff);\n  if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {\n    VReport(1, \"WARNING: Program is being run with address space layout \"\n               \"randomization (ASLR) enabled which prevents the thread and \"\n               \"memory sanitizers from working on powerpc64le.\\n\"\n               \"ASLR will be disabled and the program re-executed.\\n\");\n    CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);\n    ReExec();\n  }\n#elif SANITIZER_FREEBSD\n  int aslr_status;\n  if (UNLIKELY(procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status) == -1)) {\n    // We're making things less 'dramatic' here since\n    // the cmd is not necessarily guaranteed to be here\n    // just yet regarding FreeBSD release\n    return;\n  }\n  if ((aslr_status & PROC_ASLR_ACTIVE) != 0) {\n    Printf(\"This sanitizer is not compatible with enabled ASLR \"\n           \"and binaries compiled with PIE\\n\");\n    Die();\n  }\n#else\n  // Do nothing\n#endif\n}\n\nvoid CheckMPROTECT() {\n#if SANITIZER_NETBSD\n  int mib[3];\n  int paxflags;\n  uptr len = sizeof(paxflags);\n\n  mib[0] = CTL_PROC;\n  mib[1] = internal_getpid();\n  mib[2] = PROC_PID_PAXFLAGS;\n\n  if (UNLIKELY(internal_sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) {\n    Printf(\"sysctl failed\\n\");\n    Die();\n  }\n\n  if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_MPROTECT)) {\n    Printf(\"This sanitizer is not compatible with enabled MPROTECT\\n\");\n    Die();\n  }\n#else\n  // Do nothing\n#endif\n}\n\nvoid CheckNoDeepBind(const char *filename, int flag) {\n#ifdef RTLD_DEEPBIND\n  if (flag & RTLD_DEEPBIND) {\n    Report(\n        \"You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag\"\n        \" which is incompatible with sanitizer runtime \"\n        \"(see https://github.com/google/sanitizers/issues/611 for details\"\n        \"). If you want to run %s library under sanitizers please remove \"\n        \"RTLD_DEEPBIND from dlopen flags.\\n\",\n        filename, filename);\n    Die();\n  }\n#endif\n}\n\nuptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,\n                              uptr *largest_gap_found,\n                              uptr *max_occupied_addr) {\n  UNREACHABLE(\"FindAvailableMemoryRange is not available\");\n  return 0;\n}\n\nbool GetRandom(void *buffer, uptr length, bool blocking) {\n  if (!buffer || !length || length > 256)\n    return false;\n#if SANITIZER_USE_GETENTROPY\n  uptr rnd = getentropy(buffer, length);\n  int rverrno = 0;\n  if (internal_iserror(rnd, &rverrno) && rverrno == EFAULT)\n    return false;\n  else if (rnd == 0)\n    return true;\n#endif // SANITIZER_USE_GETENTROPY\n\n#if SANITIZER_USE_GETRANDOM\n  static atomic_uint8_t skip_getrandom_syscall;\n  if (!atomic_load_relaxed(&skip_getrandom_syscall)) {\n    // Up to 256 bytes, getrandom will not be interrupted.\n    uptr res = internal_syscall(SYSCALL(getrandom), buffer, length,\n                                blocking ? 0 : GRND_NONBLOCK);\n    int rverrno = 0;\n    if (internal_iserror(res, &rverrno) && rverrno == ENOSYS)\n      atomic_store_relaxed(&skip_getrandom_syscall, 1);\n    else if (res == length)\n      return true;\n  }\n#endif // SANITIZER_USE_GETRANDOM\n  // Up to 256 bytes, a read off /dev/urandom will not be interrupted.\n  // blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.\n  uptr fd = internal_open(\"/dev/urandom\", O_RDONLY);\n  if (internal_iserror(fd))\n    return false;\n  uptr res = internal_read(fd, buffer, length);\n  if (internal_iserror(res))\n    return false;\n  internal_close(fd);\n  return true;\n}\n\n} // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_linux.h",
    "content": "//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Linux-specific syscall wrappers and classes.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_LINUX_H\n#define SANITIZER_LINUX_H\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||                \\\n    SANITIZER_SOLARIS\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform_limits_freebsd.h\"\n#include \"sanitizer_platform_limits_netbsd.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_platform_limits_solaris.h\"\n#include \"sanitizer_posix.h\"\n\nstruct link_map;  // Opaque type returned by dlopen().\nstruct utsname;\n\nnamespace __sanitizer {\n// Dirent structure for getdents(). Note that this structure is different from\n// the one in <dirent.h>, which is used by readdir().\nstruct linux_dirent;\n\nstruct ProcSelfMapsBuff {\n  char *data;\n  uptr mmaped_size;\n  uptr len;\n};\n\nstruct MemoryMappingLayoutData {\n  ProcSelfMapsBuff proc_self_maps;\n  const char *current;\n};\n\nvoid ReadProcMaps(ProcSelfMapsBuff *proc_maps);\n\n// Syscall wrappers.\nuptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);\nuptr internal_sigaltstack(const void* ss, void* oss);\nuptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,\n    __sanitizer_sigset_t *oldset);\n\nvoid SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);\nstruct ScopedBlockSignals {\n  explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);\n  ~ScopedBlockSignals();\n\n  ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;\n  ScopedBlockSignals(const ScopedBlockSignals &) = delete;\n\n private:\n  __sanitizer_sigset_t saved_;\n};\n\n#  if SANITIZER_GLIBC\nuptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);\n#endif\n\n// Linux-only syscalls.\n#if SANITIZER_LINUX\nuptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);\n// Used only by sanitizer_stoptheworld. Signal handlers that are actually used\n// (like the process-wide error reporting SEGV handler) must use\n// internal_sigaction instead.\nint internal_sigaction_norestorer(int signum, const void *act, void *oldact);\nvoid internal_sigdelset(__sanitizer_sigset_t *set, int signum);\n#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \\\n    defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \\\n    defined(__arm__) || SANITIZER_RISCV64\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr);\n#endif\nint internal_uname(struct utsname *buf);\n#elif SANITIZER_FREEBSD\nvoid internal_sigdelset(__sanitizer_sigset_t *set, int signum);\n#elif SANITIZER_NETBSD\nvoid internal_sigdelset(__sanitizer_sigset_t *set, int signum);\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);\n#endif  // SANITIZER_LINUX\n\n// This class reads thread IDs from /proc/<pid>/task using only syscalls.\nclass ThreadLister {\n public:\n  explicit ThreadLister(pid_t pid);\n  ~ThreadLister();\n  enum Result {\n    Error,\n    Incomplete,\n    Ok,\n  };\n  Result ListThreads(InternalMmapVector<tid_t> *threads);\n\n private:\n  bool IsAlive(int tid);\n\n  pid_t pid_;\n  int descriptor_ = -1;\n  InternalMmapVector<char> buffer_;\n};\n\n// Exposed for testing.\nuptr ThreadDescriptorSize();\nuptr ThreadSelf();\n\n// Matches a library's file name against a base name (stripping path and version\n// information).\nbool LibraryNameIs(const char *full_name, const char *base_name);\n\n// Call cb for each region mapped by map.\nvoid ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));\n\n// Releases memory pages entirely within the [beg, end] address range.\n// The pages no longer count toward RSS; reads are guaranteed to return 0.\n// Requires (but does not verify!) that pages are MAP_PRIVATE.\ninline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {\n  // man madvise on Linux promises zero-fill for anonymous private pages.\n  // Testing shows the same behaviour for private (but not anonymous) mappings\n  // of shm_open() files, as long as the underlying file is untouched.\n  CHECK(SANITIZER_LINUX);\n  ReleaseMemoryPagesToOS(beg, end);\n}\n\n#if SANITIZER_ANDROID\n\n#if defined(__aarch64__)\n# define __get_tls() \\\n    ({ void** __v; __asm__(\"mrs %0, tpidr_el0\" : \"=r\"(__v)); __v; })\n#elif defined(__arm__)\n# define __get_tls() \\\n    ({ void** __v; __asm__(\"mrc p15, 0, %0, c13, c0, 3\" : \"=r\"(__v)); __v; })\n#elif defined(__mips__)\n// On mips32r1, this goes via a kernel illegal instruction trap that's\n// optimized for v1.\n# define __get_tls() \\\n    ({ register void** __v asm(\"v1\"); \\\n       __asm__(\".set    push\\n\" \\\n               \".set    mips32r2\\n\" \\\n               \"rdhwr   %0,$29\\n\" \\\n               \".set    pop\\n\" : \"=r\"(__v)); \\\n       __v; })\n#elif defined(__i386__)\n# define __get_tls() \\\n    ({ void** __v; __asm__(\"movl %%gs:0, %0\" : \"=r\"(__v)); __v; })\n#elif defined(__x86_64__)\n# define __get_tls() \\\n    ({ void** __v; __asm__(\"mov %%fs:0, %0\" : \"=r\"(__v)); __v; })\n#else\n#error \"Unsupported architecture.\"\n#endif\n\n// The Android Bionic team has allocated a TLS slot for sanitizers starting\n// with Q, given that Android currently doesn't support ELF TLS. It is used to\n// store sanitizer thread specific data.\nstatic const int TLS_SLOT_SANITIZER = 6;\n\nALWAYS_INLINE uptr *get_android_tls_ptr() {\n  return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);\n}\n\n#endif  // SANITIZER_ANDROID\n\n}  // namespace __sanitizer\n\n#endif\n#endif  // SANITIZER_LINUX_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_linux_libcdep.cpp",
    "content": "//===-- sanitizer_linux_libcdep.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and implements linux-specific functions from\n// sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_freebsd.h\"\n#include \"sanitizer_getauxval.h\"\n#include \"sanitizer_glibc_version.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_procmaps.h\"\n\n#if SANITIZER_NETBSD\n#define _RTLD_SOURCE  // for __lwp_gettcb_fast() / __lwp_getprivate_fast()\n#endif\n\n#include <dlfcn.h>  // for dlsym()\n#include <link.h>\n#include <pthread.h>\n#include <signal.h>\n#include <sys/mman.h>\n#include <sys/resource.h>\n#include <syslog.h>\n\n#if !defined(ElfW)\n#define ElfW(type) Elf_##type\n#endif\n\n#if SANITIZER_FREEBSD\n#include <pthread_np.h>\n#include <osreldate.h>\n#include <sys/sysctl.h>\n#define pthread_getattr_np pthread_attr_get_np\n// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before\n// that, it was never implemented. So just define it to zero.\n#undef MAP_NORESERVE\n#define MAP_NORESERVE 0\n#endif\n\n#if SANITIZER_NETBSD\n#include <sys/sysctl.h>\n#include <sys/tls.h>\n#include <lwp.h>\n#endif\n\n#if SANITIZER_SOLARIS\n#include <stdlib.h>\n#include <thread.h>\n#endif\n\n#if SANITIZER_ANDROID\n#include <android/api-level.h>\n#if !defined(CPU_COUNT) && !defined(__aarch64__)\n#include <dirent.h>\n#include <fcntl.h>\nstruct __sanitizer::linux_dirent {\n  long           d_ino;\n  off_t          d_off;\n  unsigned short d_reclen;\n  char           d_name[];\n};\n#endif\n#endif\n\n#if !SANITIZER_ANDROID\n#include <elf.h>\n#include <unistd.h>\n#endif\n\nnamespace __sanitizer {\n\nSANITIZER_WEAK_ATTRIBUTE int\nreal_sigaction(int signum, const void *act, void *oldact);\n\nint internal_sigaction(int signum, const void *act, void *oldact) {\n#if !SANITIZER_GO\n  if (&real_sigaction)\n    return real_sigaction(signum, act, oldact);\n#endif\n  return sigaction(signum, (const struct sigaction *)act,\n                   (struct sigaction *)oldact);\n}\n\nvoid GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,\n                                uptr *stack_bottom) {\n  CHECK(stack_top);\n  CHECK(stack_bottom);\n  if (at_initialization) {\n    // This is the main thread. Libpthread may not be initialized yet.\n    struct rlimit rl;\n    CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);\n\n    // Find the mapping that contains a stack variable.\n    MemoryMappingLayout proc_maps(/*cache_enabled*/true);\n    if (proc_maps.Error()) {\n      *stack_top = *stack_bottom = 0;\n      return;\n    }\n    MemoryMappedSegment segment;\n    uptr prev_end = 0;\n    while (proc_maps.Next(&segment)) {\n      if ((uptr)&rl < segment.end) break;\n      prev_end = segment.end;\n    }\n    CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);\n\n    // Get stacksize from rlimit, but clip it so that it does not overlap\n    // with other mappings.\n    uptr stacksize = rl.rlim_cur;\n    if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;\n    // When running with unlimited stack size, we still want to set some limit.\n    // The unlimited stack size is caused by 'ulimit -s unlimited'.\n    // Also, for some reason, GNU make spawns subprocesses with unlimited stack.\n    if (stacksize > kMaxThreadStackSize)\n      stacksize = kMaxThreadStackSize;\n    *stack_top = segment.end;\n    *stack_bottom = segment.end - stacksize;\n    return;\n  }\n  uptr stacksize = 0;\n  void *stackaddr = nullptr;\n#if SANITIZER_SOLARIS\n  stack_t ss;\n  CHECK_EQ(thr_stksegment(&ss), 0);\n  stacksize = ss.ss_size;\n  stackaddr = (char *)ss.ss_sp - stacksize;\n#else  // !SANITIZER_SOLARIS\n  pthread_attr_t attr;\n  pthread_attr_init(&attr);\n  CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);\n  my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);\n  pthread_attr_destroy(&attr);\n#endif  // SANITIZER_SOLARIS\n\n  *stack_top = (uptr)stackaddr + stacksize;\n  *stack_bottom = (uptr)stackaddr;\n}\n\n#if !SANITIZER_GO\nbool SetEnv(const char *name, const char *value) {\n  void *f = dlsym(RTLD_NEXT, \"setenv\");\n  if (!f)\n    return false;\n  typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);\n  setenv_ft setenv_f;\n  CHECK_EQ(sizeof(setenv_f), sizeof(f));\n  internal_memcpy(&setenv_f, &f, sizeof(f));\n  return setenv_f(name, value, 1) == 0;\n}\n#endif\n\n__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,\n                                                   int *patch) {\n#ifdef _CS_GNU_LIBC_VERSION\n  char buf[64];\n  uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));\n  if (len >= sizeof(buf))\n    return false;\n  buf[len] = 0;\n  static const char kGLibC[] = \"glibc \";\n  if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0)\n    return false;\n  const char *p = buf + sizeof(kGLibC) - 1;\n  *major = internal_simple_strtoll(p, &p, 10);\n  *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;\n  *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;\n  return true;\n#else\n  return false;\n#endif\n}\n\n// True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ\n// #19826) so dlpi_tls_data cannot be used.\n//\n// musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to\n// the TLS initialization image\n// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774\n__attribute__((unused)) static int g_use_dlpi_tls_data;\n\n#if SANITIZER_GLIBC && !SANITIZER_GO\n__attribute__((unused)) static size_t g_tls_size;\nvoid InitTlsSize() {\n  int major, minor, patch;\n  g_use_dlpi_tls_data =\n      GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;\n\n#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)\n  void *get_tls_static_info = dlsym(RTLD_NEXT, \"_dl_get_tls_static_info\");\n  size_t tls_align;\n  ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);\n#endif\n}\n#else\nvoid InitTlsSize() { }\n#endif  // SANITIZER_GLIBC && !SANITIZER_GO\n\n// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage\n// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan\n// to get the pointer to thread-specific data keys in the thread control block.\n#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO\n// sizeof(struct pthread) from glibc.\nstatic atomic_uintptr_t thread_descriptor_size;\n\nstatic uptr ThreadDescriptorSizeFallback() {\n  uptr val = 0;\n#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)\n  int major;\n  int minor;\n  int patch;\n  if (GetLibcVersion(&major, &minor, &patch) && major == 2) {\n    /* sizeof(struct pthread) values from various glibc versions.  */\n    if (SANITIZER_X32)\n      val = 1728; // Assume only one particular version for x32.\n    // For ARM sizeof(struct pthread) changed in Glibc 2.23.\n    else if (SANITIZER_ARM)\n      val = minor <= 22 ? 1120 : 1216;\n    else if (minor <= 3)\n      val = FIRST_32_SECOND_64(1104, 1696);\n    else if (minor == 4)\n      val = FIRST_32_SECOND_64(1120, 1728);\n    else if (minor == 5)\n      val = FIRST_32_SECOND_64(1136, 1728);\n    else if (minor <= 9)\n      val = FIRST_32_SECOND_64(1136, 1712);\n    else if (minor == 10)\n      val = FIRST_32_SECOND_64(1168, 1776);\n    else if (minor == 11 || (minor == 12 && patch == 1))\n      val = FIRST_32_SECOND_64(1168, 2288);\n    else if (minor <= 14)\n      val = FIRST_32_SECOND_64(1168, 2304);\n    else if (minor < 32)  // Unknown version\n      val = FIRST_32_SECOND_64(1216, 2304);\n    else  // minor == 32\n      val = FIRST_32_SECOND_64(1344, 2496);\n  }\n#elif defined(__s390__) || defined(__sparc__)\n  // The size of a prefix of TCB including pthread::{specific_1stblock,specific}\n  // suffices. Just return offsetof(struct pthread, specific_used), which hasn't\n  // changed since 2007-05. Technically this applies to i386/x86_64 as well but\n  // we call _dl_get_tls_static_info and need the precise size of struct\n  // pthread.\n  return FIRST_32_SECOND_64(524, 1552);\n#elif defined(__mips__)\n  // TODO(sagarthakur): add more values as per different glibc versions.\n  val = FIRST_32_SECOND_64(1152, 1776);\n#elif SANITIZER_RISCV64\n  int major;\n  int minor;\n  int patch;\n  if (GetLibcVersion(&major, &minor, &patch) && major == 2) {\n    // TODO: consider adding an optional runtime check for an unknown (untested)\n    // glibc version\n    if (minor <= 28)  // WARNING: the highest tested version is 2.29\n      val = 1772;     // no guarantees for this one\n    else if (minor <= 31)\n      val = 1772;  // tested against glibc 2.29, 2.31\n    else\n      val = 1936;  // tested against glibc 2.32\n  }\n\n#elif defined(__aarch64__)\n  // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.\n  val = 1776;\n#elif defined(__powerpc64__)\n  val = 1776; // from glibc.ppc64le 2.20-8.fc21\n#endif\n  return val;\n}\n\nuptr ThreadDescriptorSize() {\n  uptr val = atomic_load_relaxed(&thread_descriptor_size);\n  if (val)\n    return val;\n  // _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in\n  // glibc 2.34 and later.\n  if (unsigned *psizeof = static_cast<unsigned *>(\n          dlsym(RTLD_DEFAULT, \"_thread_db_sizeof_pthread\")))\n    val = *psizeof;\n  if (!val)\n    val = ThreadDescriptorSizeFallback();\n  atomic_store_relaxed(&thread_descriptor_size, val);\n  return val;\n}\n\n#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64\n// TlsPreTcbSize includes size of struct pthread_descr and size of tcb\n// head structure. It lies before the static tls blocks.\nstatic uptr TlsPreTcbSize() {\n#if defined(__mips__)\n  const uptr kTcbHead = 16; // sizeof (tcbhead_t)\n#elif defined(__powerpc64__)\n  const uptr kTcbHead = 88; // sizeof (tcbhead_t)\n#elif SANITIZER_RISCV64\n  const uptr kTcbHead = 16;  // sizeof (tcbhead_t)\n#endif\n  const uptr kTlsAlign = 16;\n  const uptr kTlsPreTcbSize =\n      RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);\n  return kTlsPreTcbSize;\n}\n#endif\n\nnamespace {\nstruct TlsBlock {\n  uptr begin, end, align;\n  size_t tls_modid;\n  bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; }\n};\n}  // namespace\n\n#ifdef __s390__\nextern \"C\" uptr __tls_get_offset(void *arg);\n\nstatic uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {\n  // The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an\n  // offset of a struct tls_index inside GOT. We don't possess either of the\n  // two, so violate the letter of the \"ELF Handling For Thread-Local\n  // Storage\" document and assume that the implementation just dereferences\n  // %r2 + %r12.\n  uptr tls_index[2] = {ti_module, ti_offset};\n  register uptr r2 asm(\"2\") = 0;\n  register void *r12 asm(\"12\") = tls_index;\n  asm(\"basr %%r14, %[__tls_get_offset]\"\n      : \"+r\"(r2)\n      : [__tls_get_offset] \"r\"(__tls_get_offset), \"r\"(r12)\n      : \"memory\", \"cc\", \"0\", \"1\", \"3\", \"4\", \"5\", \"14\");\n  return r2;\n}\n#else\nextern \"C\" void *__tls_get_addr(size_t *);\n#endif\n\nstatic int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,\n                                  void *data) {\n  if (!info->dlpi_tls_modid)\n    return 0;\n  uptr begin = (uptr)info->dlpi_tls_data;\n  if (!g_use_dlpi_tls_data) {\n    // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc\n    // and FreeBSD.\n#ifdef __s390__\n    begin = (uptr)__builtin_thread_pointer() +\n            TlsGetOffset(info->dlpi_tls_modid, 0);\n#else\n    size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};\n    begin = (uptr)__tls_get_addr(mod_and_off);\n#endif\n  }\n  for (unsigned i = 0; i != info->dlpi_phnum; ++i)\n    if (info->dlpi_phdr[i].p_type == PT_TLS) {\n      static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(\n          TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,\n                   info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});\n      break;\n    }\n  return 0;\n}\n\n__attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,\n                                                         uptr *align) {\n  InternalMmapVector<TlsBlock> ranges;\n  dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);\n  uptr len = ranges.size();\n  Sort(ranges.begin(), len);\n  // Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,\n  // this module is guaranteed to exist and is one of the initially loaded\n  // modules.\n  uptr one = 0;\n  while (one != len && ranges[one].tls_modid != 1) ++one;\n  if (one == len) {\n    // This may happen with musl if no module uses PT_TLS.\n    *addr = 0;\n    *size = 0;\n    *align = 1;\n    return;\n  }\n  // Find the maximum consecutive ranges. We consider two modules consecutive if\n  // the gap is smaller than the alignment. The dynamic loader places static TLS\n  // blocks this way not to waste space.\n  uptr l = one;\n  *align = ranges[l].align;\n  while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)\n    *align = Max(*align, ranges[--l].align);\n  uptr r = one + 1;\n  while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)\n    *align = Max(*align, ranges[r++].align);\n  *addr = ranges[l].begin;\n  *size = ranges[r - 1].end - ranges[l].begin;\n}\n#endif  // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||\n        // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO\n\n#if SANITIZER_NETBSD\nstatic struct tls_tcb * ThreadSelfTlsTcb() {\n  struct tls_tcb *tcb = nullptr;\n#ifdef __HAVE___LWP_GETTCB_FAST\n  tcb = (struct tls_tcb *)__lwp_gettcb_fast();\n#elif defined(__HAVE___LWP_GETPRIVATE_FAST)\n  tcb = (struct tls_tcb *)__lwp_getprivate_fast();\n#endif\n  return tcb;\n}\n\nuptr ThreadSelf() {\n  return (uptr)ThreadSelfTlsTcb()->tcb_pthread;\n}\n\nint GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {\n  const Elf_Phdr *hdr = info->dlpi_phdr;\n  const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum;\n\n  for (; hdr != last_hdr; ++hdr) {\n    if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {\n      *(uptr*)data = hdr->p_memsz;\n      break;\n    }\n  }\n  return 0;\n}\n#endif  // SANITIZER_NETBSD\n\n#if SANITIZER_ANDROID\n// Bionic provides this API since S.\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **,\n                                                                      void **);\n#endif\n\n#if !SANITIZER_GO\nstatic void GetTls(uptr *addr, uptr *size) {\n#if SANITIZER_ANDROID\n  if (&__libc_get_static_tls_bounds) {\n    void *start_addr;\n    void *end_addr;\n    __libc_get_static_tls_bounds(&start_addr, &end_addr);\n    *addr = reinterpret_cast<uptr>(start_addr);\n    *size =\n        reinterpret_cast<uptr>(end_addr) - reinterpret_cast<uptr>(start_addr);\n  } else {\n    *addr = 0;\n    *size = 0;\n  }\n#elif SANITIZER_GLIBC && defined(__x86_64__)\n  // For aarch64 and x86-64, use an O(1) approach which requires relatively\n  // precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.\n  asm(\"mov %%fs:16,%0\" : \"=r\"(*addr));\n  *size = g_tls_size;\n  *addr -= *size;\n  *addr += ThreadDescriptorSize();\n#elif SANITIZER_GLIBC && defined(__aarch64__)\n  *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -\n          ThreadDescriptorSize();\n  *size = g_tls_size + ThreadDescriptorSize();\n#elif SANITIZER_GLIBC && defined(__powerpc64__)\n  // Workaround for glibc<2.25(?). 2.27 is known to not need this.\n  uptr tp;\n  asm(\"addi %0,13,-0x7000\" : \"=r\"(tp));\n  const uptr pre_tcb_size = TlsPreTcbSize();\n  *addr = tp - pre_tcb_size;\n  *size = g_tls_size + pre_tcb_size;\n#elif SANITIZER_FREEBSD || SANITIZER_LINUX\n  uptr align;\n  GetStaticTlsBoundary(addr, size, &align);\n#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \\\n    defined(__sparc__)\n  if (SANITIZER_GLIBC) {\n#if defined(__x86_64__) || defined(__i386__)\n    align = Max<uptr>(align, 64);\n#else\n    align = Max<uptr>(align, 16);\n#endif\n  }\n  const uptr tp = RoundUpTo(*addr + *size, align);\n\n  // lsan requires the range to additionally cover the static TLS surplus\n  // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for\n  // allocations only referenced by tls in dynamically loaded modules.\n  if (SANITIZER_GLIBC)\n    *size += 1644;\n  else if (SANITIZER_FREEBSD)\n    *size += 128;  // RTLD_STATIC_TLS_EXTRA\n\n  // Extend the range to include the thread control block. On glibc, lsan needs\n  // the range to include pthread::{specific_1stblock,specific} so that\n  // allocations only referenced by pthread_setspecific can be scanned. This may\n  // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine\n  // because the number of bytes after pthread::specific is larger.\n  *addr = tp - RoundUpTo(*size, align);\n  *size = tp - *addr + ThreadDescriptorSize();\n#else\n  if (SANITIZER_GLIBC)\n    *size += 1664;\n  else if (SANITIZER_FREEBSD)\n    *size += 128;  // RTLD_STATIC_TLS_EXTRA\n#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64\n  const uptr pre_tcb_size = TlsPreTcbSize();\n  *addr -= pre_tcb_size;\n  *size += pre_tcb_size;\n#else\n  // arm and aarch64 reserve two words at TP, so this underestimates the range.\n  // However, this is sufficient for the purpose of finding the pointers to\n  // thread-specific data keys.\n  const uptr tcb_size = ThreadDescriptorSize();\n  *addr -= tcb_size;\n  *size += tcb_size;\n#endif\n#endif\n#elif SANITIZER_NETBSD\n  struct tls_tcb * const tcb = ThreadSelfTlsTcb();\n  *addr = 0;\n  *size = 0;\n  if (tcb != 0) {\n    // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program).\n    // ld.elf_so hardcodes the index 1.\n    dl_iterate_phdr(GetSizeFromHdr, size);\n\n    if (*size != 0) {\n      // The block has been found and tcb_dtv[1] contains the base address\n      *addr = (uptr)tcb->tcb_dtv[1];\n    }\n  }\n#elif SANITIZER_SOLARIS\n  // FIXME\n  *addr = 0;\n  *size = 0;\n#else\n#error \"Unknown OS\"\n#endif\n}\n#endif\n\n#if !SANITIZER_GO\nuptr GetTlsSize() {\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n  uptr addr, size;\n  GetTls(&addr, &size);\n  return size;\n#else\n  return 0;\n#endif\n}\n#endif\n\nvoid GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,\n                          uptr *tls_addr, uptr *tls_size) {\n#if SANITIZER_GO\n  // Stub implementation for Go.\n  *stk_addr = *stk_size = *tls_addr = *tls_size = 0;\n#else\n  GetTls(tls_addr, tls_size);\n\n  uptr stack_top, stack_bottom;\n  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);\n  *stk_addr = stack_bottom;\n  *stk_size = stack_top - stack_bottom;\n\n  if (!main) {\n    // If stack and tls intersect, make them non-intersecting.\n    if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {\n      if (*stk_addr + *stk_size < *tls_addr + *tls_size)\n        *tls_size = *stk_addr + *stk_size - *tls_addr;\n      *stk_size = *tls_addr - *stk_addr;\n    }\n  }\n#endif\n}\n\n#if !SANITIZER_FREEBSD\ntypedef ElfW(Phdr) Elf_Phdr;\n#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001  // v9.2\n#define Elf_Phdr XElf32_Phdr\n#define dl_phdr_info xdl_phdr_info\n#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))\n#endif  // !SANITIZER_FREEBSD\n\nstruct DlIteratePhdrData {\n  InternalMmapVectorNoCtor<LoadedModule> *modules;\n  bool first;\n};\n\nstatic int AddModuleSegments(const char *module_name, dl_phdr_info *info,\n                             InternalMmapVectorNoCtor<LoadedModule> *modules) {\n  if (module_name[0] == '\\0')\n    return 0;\n  LoadedModule cur_module;\n  cur_module.set(module_name, info->dlpi_addr);\n  for (int i = 0; i < (int)info->dlpi_phnum; i++) {\n    const Elf_Phdr *phdr = &info->dlpi_phdr[i];\n    if (phdr->p_type == PT_LOAD) {\n      uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;\n      uptr cur_end = cur_beg + phdr->p_memsz;\n      bool executable = phdr->p_flags & PF_X;\n      bool writable = phdr->p_flags & PF_W;\n      cur_module.addAddressRange(cur_beg, cur_end, executable,\n                                 writable);\n    } else if (phdr->p_type == PT_NOTE) {\n#  ifdef NT_GNU_BUILD_ID\n      uptr off = 0;\n      while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {\n        auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +\n                                                          phdr->p_vaddr + off);\n        constexpr auto kGnuNamesz = 4;  // \"GNU\" with NUL-byte.\n        static_assert(kGnuNamesz % 4 == 0, \"kGnuNameSize is aligned to 4.\");\n        if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {\n          if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >\n              phdr->p_memsz) {\n            // Something is very wrong, bail out instead of reading potentially\n            // arbitrary memory.\n            break;\n          }\n          const char *name =\n              reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);\n          if (internal_memcmp(name, \"GNU\", 3) == 0) {\n            const char *value = reinterpret_cast<const char *>(nhdr) +\n                                sizeof(*nhdr) + kGnuNamesz;\n            cur_module.setUuid(value, nhdr->n_descsz);\n            break;\n          }\n        }\n        off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +\n               RoundUpTo(nhdr->n_descsz, 4);\n      }\n#  endif\n    }\n  }\n  modules->push_back(cur_module);\n  return 0;\n}\n\nstatic int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {\n  DlIteratePhdrData *data = (DlIteratePhdrData *)arg;\n  if (data->first) {\n    InternalMmapVector<char> module_name(kMaxPathLength);\n    data->first = false;\n    // First module is the binary itself.\n    ReadBinaryNameCached(module_name.data(), module_name.size());\n    return AddModuleSegments(module_name.data(), info, data->modules);\n  }\n\n  if (info->dlpi_name) {\n    InternalScopedString module_name;\n    module_name.append(\"%s\", info->dlpi_name);\n    return AddModuleSegments(module_name.data(), info, data->modules);\n  }\n\n  return 0;\n}\n\n#if SANITIZER_ANDROID && __ANDROID_API__ < 21\nextern \"C\" __attribute__((weak)) int dl_iterate_phdr(\n    int (*)(struct dl_phdr_info *, size_t, void *), void *);\n#endif\n\nstatic bool requiresProcmaps() {\n#if SANITIZER_ANDROID && __ANDROID_API__ <= 22\n  // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.\n  // The runtime check allows the same library to work with\n  // both K and L (and future) Android releases.\n  return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;\n#else\n  return false;\n#endif\n}\n\nstatic void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {\n  MemoryMappingLayout memory_mapping(/*cache_enabled*/true);\n  memory_mapping.DumpListOfModules(modules);\n}\n\nvoid ListOfModules::init() {\n  clearOrInit();\n  if (requiresProcmaps()) {\n    procmapsInit(&modules_);\n  } else {\n    DlIteratePhdrData data = {&modules_, true};\n    dl_iterate_phdr(dl_iterate_phdr_cb, &data);\n  }\n}\n\n// When a custom loader is used, dl_iterate_phdr may not contain the full\n// list of modules. Allow callers to fall back to using procmaps.\nvoid ListOfModules::fallbackInit() {\n  if (!requiresProcmaps()) {\n    clearOrInit();\n    procmapsInit(&modules_);\n  } else {\n    clear();\n  }\n}\n\n// getrusage does not give us the current RSS, only the max RSS.\n// Still, this is better than nothing if /proc/self/statm is not available\n// for some reason, e.g. due to a sandbox.\nstatic uptr GetRSSFromGetrusage() {\n  struct rusage usage;\n  if (getrusage(RUSAGE_SELF, &usage))  // Failed, probably due to a sandbox.\n    return 0;\n  return usage.ru_maxrss << 10;  // ru_maxrss is in Kb.\n}\n\nuptr GetRSS() {\n  if (!common_flags()->can_use_proc_maps_statm)\n    return GetRSSFromGetrusage();\n  fd_t fd = OpenFile(\"/proc/self/statm\", RdOnly);\n  if (fd == kInvalidFd)\n    return GetRSSFromGetrusage();\n  char buf[64];\n  uptr len = internal_read(fd, buf, sizeof(buf) - 1);\n  internal_close(fd);\n  if ((sptr)len <= 0)\n    return 0;\n  buf[len] = 0;\n  // The format of the file is:\n  // 1084 89 69 11 0 79 0\n  // We need the second number which is RSS in pages.\n  char *pos = buf;\n  // Skip the first number.\n  while (*pos >= '0' && *pos <= '9')\n    pos++;\n  // Skip whitespaces.\n  while (!(*pos >= '0' && *pos <= '9') && *pos != 0)\n    pos++;\n  // Read the number.\n  uptr rss = 0;\n  while (*pos >= '0' && *pos <= '9')\n    rss = rss * 10 + *pos++ - '0';\n  return rss * GetPageSizeCached();\n}\n\n// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as\n// they allocate memory.\nu32 GetNumberOfCPUs() {\n#if SANITIZER_FREEBSD || SANITIZER_NETBSD\n  u32 ncpu;\n  int req[2];\n  uptr len = sizeof(ncpu);\n  req[0] = CTL_HW;\n  req[1] = HW_NCPU;\n  CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);\n  return ncpu;\n#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)\n  // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't\n  // exist in sched.h. That is the case for toolchains generated with older\n  // NDKs.\n  // This code doesn't work on AArch64 because internal_getdents makes use of\n  // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.\n  uptr fd = internal_open(\"/sys/devices/system/cpu\", O_RDONLY | O_DIRECTORY);\n  if (internal_iserror(fd))\n    return 0;\n  InternalMmapVector<u8> buffer(4096);\n  uptr bytes_read = buffer.size();\n  uptr n_cpus = 0;\n  u8 *d_type;\n  struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];\n  while (true) {\n    if ((u8 *)entry >= &buffer[bytes_read]) {\n      bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),\n                                     buffer.size());\n      if (internal_iserror(bytes_read) || !bytes_read)\n        break;\n      entry = (struct linux_dirent *)buffer.data();\n    }\n    d_type = (u8 *)entry + entry->d_reclen - 1;\n    if (d_type >= &buffer[bytes_read] ||\n        (u8 *)&entry->d_name[3] >= &buffer[bytes_read])\n      break;\n    if (entry->d_ino != 0 && *d_type == DT_DIR) {\n      if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&\n          entry->d_name[2] == 'u' &&\n          entry->d_name[3] >= '0' && entry->d_name[3] <= '9')\n        n_cpus++;\n    }\n    entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);\n  }\n  internal_close(fd);\n  return n_cpus;\n#elif SANITIZER_SOLARIS\n  return sysconf(_SC_NPROCESSORS_ONLN);\n#else\n  cpu_set_t CPUs;\n  CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);\n  return CPU_COUNT(&CPUs);\n#endif\n}\n\n#if SANITIZER_LINUX\n\n#if SANITIZER_ANDROID\nstatic atomic_uint8_t android_log_initialized;\n\nvoid AndroidLogInit() {\n  openlog(GetProcessName(), 0, LOG_USER);\n  atomic_store(&android_log_initialized, 1, memory_order_release);\n}\n\nstatic bool ShouldLogAfterPrintf() {\n  return atomic_load(&android_log_initialized, memory_order_acquire);\n}\n\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE\nint async_safe_write_log(int pri, const char* tag, const char* msg);\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE\nint __android_log_write(int prio, const char* tag, const char* msg);\n\n// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.\n#define SANITIZER_ANDROID_LOG_INFO 4\n\n// async_safe_write_log is a new public version of __libc_write_log that is\n// used behind syslog. It is preferable to syslog as it will not do any dynamic\n// memory allocation or formatting.\n// If the function is not available, syslog is preferred for L+ (it was broken\n// pre-L) as __android_log_write triggers a racey behavior with the strncpy\n// interceptor. Fallback to __android_log_write pre-L.\nvoid WriteOneLineToSyslog(const char *s) {\n  if (&async_safe_write_log) {\n    async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);\n  } else if (AndroidGetApiLevel() > ANDROID_KITKAT) {\n    syslog(LOG_INFO, \"%s\", s);\n  } else {\n    CHECK(&__android_log_write);\n    __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);\n  }\n}\n\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE\nvoid android_set_abort_message(const char *);\n\nvoid SetAbortMessage(const char *str) {\n  if (&android_set_abort_message)\n    android_set_abort_message(str);\n}\n#else\nvoid AndroidLogInit() {}\n\nstatic bool ShouldLogAfterPrintf() { return true; }\n\nvoid WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, \"%s\", s); }\n\nvoid SetAbortMessage(const char *str) {}\n#endif  // SANITIZER_ANDROID\n\nvoid LogMessageOnPrintf(const char *str) {\n  if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())\n    WriteToSyslog(str);\n}\n\n#endif  // SANITIZER_LINUX\n\n#if SANITIZER_GLIBC && !SANITIZER_GO\n// glibc crashes when using clock_gettime from a preinit_array function as the\n// vDSO function pointers haven't been initialized yet. __progname is\n// initialized after the vDSO function pointers, so if it exists, is not null\n// and is not empty, we can use clock_gettime.\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE char *__progname;\ninline bool CanUseVDSO() { return &__progname && __progname && *__progname; }\n\n// MonotonicNanoTime is a timing function that can leverage the vDSO by calling\n// clock_gettime. real_clock_gettime only exists if clock_gettime is\n// intercepted, so define it weakly and use it if available.\nextern \"C\" SANITIZER_WEAK_ATTRIBUTE\nint real_clock_gettime(u32 clk_id, void *tp);\nu64 MonotonicNanoTime() {\n  timespec ts;\n  if (CanUseVDSO()) {\n    if (&real_clock_gettime)\n      real_clock_gettime(CLOCK_MONOTONIC, &ts);\n    else\n      clock_gettime(CLOCK_MONOTONIC, &ts);\n  } else {\n    internal_clock_gettime(CLOCK_MONOTONIC, &ts);\n  }\n  return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;\n}\n#else\n// Non-glibc & Go always use the regular function.\nu64 MonotonicNanoTime() {\n  timespec ts;\n  clock_gettime(CLOCK_MONOTONIC, &ts);\n  return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;\n}\n#endif  // SANITIZER_GLIBC && !SANITIZER_GO\n\nvoid ReExec() {\n  const char *pathname = \"/proc/self/exe\";\n\n#if SANITIZER_NETBSD\n  static const int name[] = {\n      CTL_KERN,\n      KERN_PROC_ARGS,\n      -1,\n      KERN_PROC_PATHNAME,\n  };\n  char path[400];\n  uptr len;\n\n  len = sizeof(path);\n  if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)\n    pathname = path;\n#elif SANITIZER_SOLARIS\n  pathname = getexecname();\n  CHECK_NE(pathname, NULL);\n#elif SANITIZER_USE_GETAUXVAL\n  // Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that\n  // rely on that will fail to load shared libraries. Query AT_EXECFN instead.\n  pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));\n#endif\n\n  uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());\n  int rverrno;\n  CHECK_EQ(internal_iserror(rv, &rverrno), true);\n  Printf(\"execve failed, errno %d\\n\", rverrno);\n  Die();\n}\n\nvoid UnmapFromTo(uptr from, uptr to) {\n  if (to == from)\n    return;\n  CHECK(to >= from);\n  uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);\n  if (UNLIKELY(internal_iserror(res))) {\n    Report(\"ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\\n\",\n           SanitizerToolName, to - from, to - from, (void *)from);\n    CHECK(\"unable to unmap\" && 0);\n  }\n}\n\nuptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,\n                      uptr min_shadow_base_alignment,\n                      UNUSED uptr &high_mem_end) {\n  const uptr granularity = GetMmapGranularity();\n  const uptr alignment =\n      Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);\n  const uptr left_padding =\n      Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);\n\n  const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);\n  const uptr map_size = shadow_size + left_padding + alignment;\n\n  const uptr map_start = (uptr)MmapNoAccess(map_size);\n  CHECK_NE(map_start, ~(uptr)0);\n\n  const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);\n\n  UnmapFromTo(map_start, shadow_start - left_padding);\n  UnmapFromTo(shadow_start + shadow_size, map_start + map_size);\n\n  return shadow_start;\n}\n\nstatic uptr MmapSharedNoReserve(uptr addr, uptr size) {\n  return internal_mmap(\n      reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE,\n      MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);\n}\n\nstatic uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,\n                              uptr alias_size) {\n#if SANITIZER_LINUX\n  return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,\n                         MREMAP_MAYMOVE | MREMAP_FIXED,\n                         reinterpret_cast<void *>(alias_addr));\n#else\n  CHECK(false && \"mremap is not supported outside of Linux\");\n  return 0;\n#endif\n}\n\nstatic void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {\n  uptr total_size = alias_size * num_aliases;\n  uptr mapped = MmapSharedNoReserve(start_addr, total_size);\n  CHECK_EQ(mapped, start_addr);\n\n  for (uptr i = 1; i < num_aliases; ++i) {\n    uptr alias_addr = start_addr + i * alias_size;\n    CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr);\n  }\n}\n\nuptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,\n                                uptr num_aliases, uptr ring_buffer_size) {\n  CHECK_EQ(alias_size & (alias_size - 1), 0);\n  CHECK_EQ(num_aliases & (num_aliases - 1), 0);\n  CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0);\n\n  const uptr granularity = GetMmapGranularity();\n  shadow_size = RoundUpTo(shadow_size, granularity);\n  CHECK_EQ(shadow_size & (shadow_size - 1), 0);\n\n  const uptr alias_region_size = alias_size * num_aliases;\n  const uptr alignment =\n      2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size);\n  const uptr left_padding = ring_buffer_size;\n\n  const uptr right_size = alignment;\n  const uptr map_size = left_padding + 2 * alignment;\n\n  const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size));\n  CHECK_NE(map_start, static_cast<uptr>(-1));\n  const uptr right_start = RoundUpTo(map_start + left_padding, alignment);\n\n  UnmapFromTo(map_start, right_start - left_padding);\n  UnmapFromTo(right_start + right_size, map_start + map_size);\n\n  CreateAliases(right_start + right_size / 2, alias_size, num_aliases);\n\n  return right_start;\n}\n\nvoid InitializePlatformCommonFlags(CommonFlags *cf) {\n#if SANITIZER_ANDROID\n  if (&__libc_get_static_tls_bounds == nullptr)\n    cf->detect_leaks = false;\n#endif\n}\n\n} // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_linux_s390.cpp",
    "content": "//===-- sanitizer_linux_s390.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and implements s390-linux-specific functions from\n// sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_LINUX && SANITIZER_S390\n\n#include <dlfcn.h>\n#include <errno.h>\n#include <sys/syscall.h>\n#include <sys/utsname.h>\n#include <unistd.h>\n\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_linux.h\"\n\nnamespace __sanitizer {\n\n// --------------- sanitizer_libc.h\nuptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,\n                   u64 offset) {\n  struct s390_mmap_params {\n    unsigned long addr;\n    unsigned long length;\n    unsigned long prot;\n    unsigned long flags;\n    unsigned long fd;\n    unsigned long offset;\n  } params = {\n    (unsigned long)addr,\n    (unsigned long)length,\n    (unsigned long)prot,\n    (unsigned long)flags,\n    (unsigned long)fd,\n# ifdef __s390x__\n    (unsigned long)offset,\n# else\n    (unsigned long)(offset / 4096),\n# endif\n  };\n# ifdef __s390x__\n  return syscall(__NR_mmap, &params);\n# else\n  return syscall(__NR_mmap2, &params);\n# endif\n}\n\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,\n                    int *parent_tidptr, void *newtls, int *child_tidptr) {\n  if (!fn || !child_stack) {\n    errno = EINVAL;\n    return -1;\n  }\n  CHECK_EQ(0, (uptr)child_stack % 16);\n  // Minimum frame size.\n#ifdef __s390x__\n  child_stack = (char *)child_stack - 160;\n#else\n  child_stack = (char *)child_stack - 96;\n#endif\n  // Terminate unwind chain.\n  ((unsigned long *)child_stack)[0] = 0;\n  // And pass parameters.\n  ((unsigned long *)child_stack)[1] = (uptr)fn;\n  ((unsigned long *)child_stack)[2] = (uptr)arg;\n  register uptr res __asm__(\"r2\");\n  register void *__cstack      __asm__(\"r2\") = child_stack;\n  register long __flags        __asm__(\"r3\") = flags;\n  register int * __ptidptr     __asm__(\"r4\") = parent_tidptr;\n  register int * __ctidptr     __asm__(\"r5\") = child_tidptr;\n  register void * __newtls     __asm__(\"r6\") = newtls;\n\n  __asm__ __volatile__(\n                       /* Clone. */\n                       \"svc    %1\\n\"\n\n                       /* if (%r2 != 0)\n                        *   return;\n                        */\n#ifdef __s390x__\n                       \"cghi   %%r2, 0\\n\"\n#else\n                       \"chi    %%r2, 0\\n\"\n#endif\n                       \"jne    1f\\n\"\n\n                       /* Call \"fn(arg)\". */\n#ifdef __s390x__\n                       \"lmg    %%r1, %%r2, 8(%%r15)\\n\"\n#else\n                       \"lm     %%r1, %%r2, 4(%%r15)\\n\"\n#endif\n                       \"basr   %%r14, %%r1\\n\"\n\n                       /* Call _exit(%r2). */\n                       \"svc %2\\n\"\n\n                       /* Return to parent. */\n                     \"1:\\n\"\n                       : \"=r\" (res)\n                       : \"i\"(__NR_clone), \"i\"(__NR_exit),\n                         \"r\"(__cstack),\n                         \"r\"(__flags),\n                         \"r\"(__ptidptr),\n                         \"r\"(__ctidptr),\n                         \"r\"(__newtls)\n                       : \"memory\", \"cc\");\n  if (res >= (uptr)-4095) {\n    errno = -res;\n    return -1;\n  }\n  return res;\n}\n\n#if SANITIZER_S390_64\nstatic bool FixedCVE_2016_2143() {\n  // Try to determine if the running kernel has a fix for CVE-2016-2143,\n  // return false if in doubt (better safe than sorry).  Distros may want to\n  // adjust this for their own kernels.\n  struct utsname buf;\n  unsigned int major, minor, patch = 0;\n  // This should never fail, but just in case...\n  if (internal_uname(&buf))\n    return false;\n  const char *ptr = buf.release;\n  major = internal_simple_strtoll(ptr, &ptr, 10);\n  // At least first 2 should be matched.\n  if (ptr[0] != '.')\n    return false;\n  minor = internal_simple_strtoll(ptr+1, &ptr, 10);\n  // Third is optional.\n  if (ptr[0] == '.')\n    patch = internal_simple_strtoll(ptr+1, &ptr, 10);\n  if (major < 3) {\n    if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&\n        internal_strstr(ptr, \".el6\")) {\n      // Check RHEL6\n      int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);\n      if (r1 >= 657) // 2.6.32-657.el6 or later\n        return true;\n      if (r1 == 642 && ptr[0] == '.') {\n        int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);\n        if (r2 >= 9) // 2.6.32-642.9.1.el6 or later\n          return true;\n      }\n    }\n    // <3.0 is bad.\n    return false;\n  } else if (major == 3) {\n    // 3.2.79+ is OK.\n    if (minor == 2 && patch >= 79)\n      return true;\n    // 3.12.58+ is OK.\n    if (minor == 12 && patch >= 58)\n      return true;\n    if (minor == 10 && patch == 0 && ptr[0] == '-' &&\n        internal_strstr(ptr, \".el7\")) {\n      // Check RHEL7\n      int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);\n      if (r1 >= 426) // 3.10.0-426.el7 or later\n        return true;\n      if (r1 == 327 && ptr[0] == '.') {\n        int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);\n        if (r2 >= 27) // 3.10.0-327.27.1.el7 or later\n          return true;\n      }\n    }\n    // Otherwise, bad.\n    return false;\n  } else if (major == 4) {\n    // 4.1.21+ is OK.\n    if (minor == 1 && patch >= 21)\n      return true;\n    // 4.4.6+ is OK.\n    if (minor == 4 && patch >= 6)\n      return true;\n    if (minor == 4 && patch == 0 && ptr[0] == '-' &&\n        internal_strstr(buf.version, \"Ubuntu\")) {\n      // Check Ubuntu 16.04\n      int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);\n      if (r1 >= 13) // 4.4.0-13 or later\n        return true;\n    }\n    // Otherwise, OK if 4.5+.\n    return minor >= 5;\n  } else {\n    // Linux 5 and up are fine.\n    return true;\n  }\n}\n\nvoid AvoidCVE_2016_2143() {\n  // Older kernels are affected by CVE-2016-2143 - they will crash hard\n  // if someone uses 4-level page tables (ie. virtual addresses >= 4TB)\n  // and fork() in the same process.  Unfortunately, sanitizers tend to\n  // require such addresses.  Since this is very likely to crash the whole\n  // machine (sanitizers themselves use fork() for llvm-symbolizer, for one),\n  // abort the process at initialization instead.\n  if (FixedCVE_2016_2143())\n    return;\n  if (GetEnv(\"SANITIZER_IGNORE_CVE_2016_2143\"))\n    return;\n  Report(\n    \"ERROR: Your kernel seems to be vulnerable to CVE-2016-2143.  Using ASan,\\n\"\n    \"MSan, TSan, DFSan or LSan with such kernel can and will crash your\\n\"\n    \"machine, or worse.\\n\"\n    \"\\n\"\n    \"If you are certain your kernel is not vulnerable (you have compiled it\\n\"\n    \"yourself, or are using an unrecognized distribution kernel), you can\\n\"\n    \"override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\\n\"\n    \"with any value.\\n\");\n  Die();\n}\n#endif\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_LINUX && SANITIZER_S390\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_list.h",
    "content": "//===-- sanitizer_list.h ----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file contains implementation of a list class to be used by\n// ThreadSanitizer, etc run-times.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_LIST_H\n#define SANITIZER_LIST_H\n\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\n// Intrusive singly-linked list with size(), push_back(), push_front()\n// pop_front(), append_front() and append_back().\n// This class should be a POD (so that it can be put into TLS)\n// and an object with all zero fields should represent a valid empty list.\n// This class does not have a CTOR, so clear() should be called on all\n// non-zero-initialized objects before using.\ntemplate<class Item>\nstruct IntrusiveList {\n  friend class Iterator;\n\n  void clear() {\n    first_ = last_ = nullptr;\n    size_ = 0;\n  }\n\n  bool empty() const { return size_ == 0; }\n  uptr size() const { return size_; }\n\n  void push_back(Item *x) {\n    if (empty()) {\n      x->next = nullptr;\n      first_ = last_ = x;\n      size_ = 1;\n    } else {\n      x->next = nullptr;\n      last_->next = x;\n      last_ = x;\n      size_++;\n    }\n  }\n\n  void push_front(Item *x) {\n    if (empty()) {\n      x->next = nullptr;\n      first_ = last_ = x;\n      size_ = 1;\n    } else {\n      x->next = first_;\n      first_ = x;\n      size_++;\n    }\n  }\n\n  void pop_front() {\n    CHECK(!empty());\n    first_ = first_->next;\n    if (!first_)\n      last_ = nullptr;\n    size_--;\n  }\n\n  void extract(Item *prev, Item *x) {\n    CHECK(!empty());\n    CHECK_NE(prev, nullptr);\n    CHECK_NE(x, nullptr);\n    CHECK_EQ(prev->next, x);\n    prev->next = x->next;\n    if (last_ == x)\n      last_ = prev;\n    size_--;\n  }\n\n  Item *front() { return first_; }\n  const Item *front() const { return first_; }\n  Item *back() { return last_; }\n  const Item *back() const { return last_; }\n\n  void append_front(IntrusiveList<Item> *l) {\n    CHECK_NE(this, l);\n    if (l->empty())\n      return;\n    if (empty()) {\n      *this = *l;\n    } else if (!l->empty()) {\n      l->last_->next = first_;\n      first_ = l->first_;\n      size_ += l->size();\n    }\n    l->clear();\n  }\n\n  void append_back(IntrusiveList<Item> *l) {\n    CHECK_NE(this, l);\n    if (l->empty())\n      return;\n    if (empty()) {\n      *this = *l;\n    } else {\n      last_->next = l->first_;\n      last_ = l->last_;\n      size_ += l->size();\n    }\n    l->clear();\n  }\n\n  void CheckConsistency() {\n    if (size_ == 0) {\n      CHECK_EQ(first_, 0);\n      CHECK_EQ(last_, 0);\n    } else {\n      uptr count = 0;\n      for (Item *i = first_; ; i = i->next) {\n        count++;\n        if (i == last_) break;\n      }\n      CHECK_EQ(size(), count);\n      CHECK_EQ(last_->next, 0);\n    }\n  }\n\n  template<class ItemTy>\n  class IteratorBase {\n   public:\n    explicit IteratorBase(ItemTy *current) : current_(current) {}\n    IteratorBase &operator++() {\n      current_ = current_->next;\n      return *this;\n    }\n    bool operator!=(IteratorBase other) const {\n      return current_ != other.current_;\n    }\n    ItemTy &operator*() {\n      return *current_;\n    }\n   private:\n    ItemTy *current_;\n  };\n\n  typedef IteratorBase<Item> Iterator;\n  typedef IteratorBase<const Item> ConstIterator;\n\n  Iterator begin() { return Iterator(first_); }\n  Iterator end() { return Iterator(0); }\n\n  ConstIterator begin() const { return ConstIterator(first_); }\n  ConstIterator end() const { return ConstIterator(0); }\n\n// private, don't use directly.\n  uptr size_;\n  Item *first_;\n  Item *last_;\n};\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_LIST_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_local_address_space_view.h",
    "content": "//===-- sanitizer_local_address_space_view.h --------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// `LocalAddressSpaceView` provides the local (i.e. target and current address\n// space are the same) implementation of the `AddressSpaceView` interface which\n// provides a simple interface to load memory from another process (i.e.\n// out-of-process)\n//\n// The `AddressSpaceView` interface requires that the type can be used as a\n// template parameter to objects that wish to be able to operate in an\n// out-of-process manner. In normal usage, objects are in-process and are thus\n// instantiated with the `LocalAddressSpaceView` type. This type is used to\n// load any pointers in instance methods. This implementation is effectively\n// a no-op. When an object is to be used in an out-of-process manner it is\n// instantiated with the `RemoteAddressSpaceView` type.\n//\n// By making `AddressSpaceView` a template parameter of an object, it can\n// change its implementation at compile time which has no run time overhead.\n// This also allows unifying in-process and out-of-process code which avoids\n// code duplication.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H\n#define SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H\n\nnamespace __sanitizer {\nstruct LocalAddressSpaceView {\n  // Load memory `sizeof(T) * num_elements` bytes of memory from the target\n  // process (always local for this implementation) starting at address\n  // `target_address`. The local copy of this memory is returned as a pointer.\n  // The caller should not write to this memory. The behaviour when doing so is\n  // undefined. Callers should use `LoadWritable()` to get access to memory\n  // that is writable.\n  //\n  // The lifetime of loaded memory is implementation defined.\n  template <typename T>\n  static const T *Load(const T *target_address, uptr num_elements = 1) {\n    // The target address space is the local address space so\n    // nothing needs to be copied. Just return the pointer.\n    return target_address;\n  }\n\n  // Load memory `sizeof(T) * num_elements` bytes of memory from the target\n  // process (always local for this implementation) starting at address\n  // `target_address`. The local copy of this memory is returned as a pointer.\n  // The memory returned may be written to.\n  //\n  // Writes made to the returned memory will be visible in the memory returned\n  // by subsequent `Load()` or `LoadWritable()` calls provided the\n  // `target_address` parameter is the same. It is not guaranteed that the\n  // memory returned by previous calls to `Load()` will contain any performed\n  // writes.  If two or more overlapping regions of memory are loaded via\n  // separate calls to `LoadWritable()`, it is implementation defined whether\n  // writes made to the region returned by one call are visible in the regions\n  // returned by other calls.\n  //\n  // Given the above it is recommended to load the largest possible object\n  // that requires modification (e.g. a class) rather than individual fields\n  // from a class to avoid issues with overlapping writable regions.\n  //\n  // The lifetime of loaded memory is implementation defined.\n  template <typename T>\n  static T *LoadWritable(T *target_address, uptr num_elements = 1) {\n    // The target address space is the local address space so\n    // nothing needs to be copied. Just return the pointer.\n    return target_address;\n  }\n};\n}  // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_lzw.h",
    "content": "//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Lempel–Ziv–Welch encoding/decoding\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_LZW_H\n#define SANITIZER_LZW_H\n\n#include \"sanitizer_dense_map.h\"\n\nnamespace __sanitizer {\n\nusing LzwCodeType = u32;\n\ntemplate <class T, class ItIn, class ItOut>\nItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {\n  using Substring =\n      detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;\n\n  // Sentinel value for substrings of len 1.\n  static constexpr LzwCodeType kNoPrefix =\n      Min(DenseMapInfo<Substring>::getEmptyKey().first,\n          DenseMapInfo<Substring>::getTombstoneKey().first) -\n      1;\n  DenseMap<Substring, LzwCodeType> prefix_to_code;\n  {\n    // Add all substring of len 1 as initial dictionary.\n    InternalMmapVector<T> dict_len1;\n    for (auto it = begin; it != end; ++it)\n      if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)\n        dict_len1.push_back(*it);\n\n    // Slightly helps with later delta encoding.\n    Sort(dict_len1.data(), dict_len1.size());\n\n    // For large sizeof(T) we have to store dict_len1. Smaller types like u8 can\n    // just generate them.\n    *out = dict_len1.size();\n    ++out;\n\n    for (uptr i = 0; i != dict_len1.size(); ++i) {\n      // Remap after the Sort.\n      prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;\n      *out = dict_len1[i];\n      ++out;\n    }\n    CHECK_EQ(prefix_to_code.size(), dict_len1.size());\n  }\n\n  if (begin == end)\n    return out;\n\n  // Main LZW encoding loop.\n  LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;\n  ++begin;\n  for (auto it = begin; it != end; ++it) {\n    // Extend match with the new item.\n    auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());\n    if (ins.second) {\n      // This is a new substring, but emit the code for the current match\n      // (before extend). This allows LZW decoder to recover the dictionary.\n      *out = match;\n      ++out;\n      // Reset the match to a single item, which must be already in the map.\n      match = prefix_to_code.find({kNoPrefix, *it})->second;\n    } else {\n      // Already known, use as the current match.\n      match = ins.first->second;\n    }\n  }\n\n  *out = match;\n  ++out;\n\n  return out;\n}\n\ntemplate <class T, class ItIn, class ItOut>\nItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {\n  if (begin == end)\n    return out;\n\n  // Load dictionary of len 1 substrings. Theses correspont to lowest codes.\n  InternalMmapVector<T> dict_len1(*begin);\n  ++begin;\n\n  if (begin == end)\n    return out;\n\n  for (auto& v : dict_len1) {\n    v = *begin;\n    ++begin;\n  }\n\n  // Substrings of len 2 and up. Indexes are shifted because [0,\n  // dict_len1.size()) stored in dict_len1. Substings get here after being\n  // emitted to the output, so we can use output position.\n  InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>\n      code_to_substr;\n\n  // Copies already emitted substrings into the output again.\n  auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {\n    if (code < dict_len1.size()) {\n      *out = dict_len1[code];\n      ++out;\n      return out;\n    }\n    const auto& s = code_to_substr[code - dict_len1.size()];\n\n    for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;\n    return out;\n  };\n\n  // Returns lens of the substring with the given code.\n  auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {\n    if (code < dict_len1.size())\n      return 1;\n    const auto& s = code_to_substr[code - dict_len1.size()];\n    return s.second - s.first;\n  };\n\n  // Main LZW decoding loop.\n  LzwCodeType prev_code = *begin;\n  ++begin;\n  out = copy(prev_code, out);\n  for (auto it = begin; it != end; ++it) {\n    LzwCodeType code = *it;\n    auto start = out;\n    if (code == dict_len1.size() + code_to_substr.size()) {\n      // Special LZW case. The code is not in the dictionary yet. This is\n      // possible only when the new substring is the same as previous one plus\n      // the first item of the previous substring. We can emit that in two\n      // steps.\n      out = copy(prev_code, out);\n      *out = *start;\n      ++out;\n    } else {\n      out = copy(code, out);\n    }\n\n    // Every time encoded emits the code, it also creates substing of len + 1\n    // including the first item of the just emmited substring. Do the same here.\n    uptr len = code_to_len(prev_code);\n    code_to_substr.push_back({start - len, start + 1});\n\n    prev_code = code;\n  }\n  return out;\n}\n\n}  // namespace __sanitizer\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_mac.cpp",
    "content": "//===-- sanitizer_mac.cpp -------------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries and\n// implements OSX-specific functions.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_MAC\n#include \"sanitizer_mac.h\"\n#include \"interception/interception.h\"\n\n// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so\n// the clients will most certainly use 64-bit ones as well.\n#ifndef _DARWIN_USE_64_BIT_INODE\n#define _DARWIN_USE_64_BIT_INODE 1\n#endif\n#include <stdio.h>\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_procmaps.h\"\n#include \"sanitizer_ptrauth.h\"\n\n#if !SANITIZER_IOS\n#include <crt_externs.h>  // for _NSGetEnviron\n#else\nextern char **environ;\n#endif\n\n#if defined(__has_include) && __has_include(<os/trace.h>)\n#define SANITIZER_OS_TRACE 1\n#include <os/trace.h>\n#else\n#define SANITIZER_OS_TRACE 0\n#endif\n\n// import new crash reporting api\n#if defined(__has_include) && __has_include(<CrashReporterClient.h>)\n#define HAVE_CRASHREPORTERCLIENT_H 1\n#include <CrashReporterClient.h>\n#else\n#define HAVE_CRASHREPORTERCLIENT_H 0\n#endif\n\n#if !SANITIZER_IOS\n#include <crt_externs.h>  // for _NSGetArgv and _NSGetEnviron\n#else\nextern \"C\" {\n  extern char ***_NSGetArgv(void);\n}\n#endif\n\n#include <asl.h>\n#include <dlfcn.h>  // for dladdr()\n#include <errno.h>\n#include <fcntl.h>\n#include <libkern/OSAtomic.h>\n#include <mach-o/dyld.h>\n#include <mach/mach.h>\n#include <mach/mach_time.h>\n#include <mach/vm_statistics.h>\n#include <malloc/malloc.h>\n#include <os/log.h>\n#include <pthread.h>\n#include <sched.h>\n#include <signal.h>\n#include <spawn.h>\n#include <stdlib.h>\n#include <sys/ioctl.h>\n#include <sys/mman.h>\n#include <sys/resource.h>\n#include <sys/stat.h>\n#include <sys/sysctl.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <unistd.h>\n#include <util.h>\n\n// From <crt_externs.h>, but we don't have that file on iOS.\nextern \"C\" {\n  extern char ***_NSGetArgv(void);\n  extern char ***_NSGetEnviron(void);\n}\n\n// From <mach/mach_vm.h>, but we don't have that file on iOS.\nextern \"C\" {\n  extern kern_return_t mach_vm_region_recurse(\n    vm_map_t target_task,\n    mach_vm_address_t *address,\n    mach_vm_size_t *size,\n    natural_t *nesting_depth,\n    vm_region_recurse_info_t info,\n    mach_msg_type_number_t *infoCnt);\n}\n\nnamespace __sanitizer {\n\n#include \"sanitizer_syscall_generic.inc\"\n\n// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).\nextern \"C\" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,\n                        off_t off) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;\n\n// ---------------------- sanitizer_libc.h\n\n// From <mach/vm_statistics.h>, but not on older OSs.\n#ifndef VM_MEMORY_SANITIZER\n#define VM_MEMORY_SANITIZER 99\n#endif\n\n// XNU on Darwin provides a mmap flag that optimizes allocation/deallocation of\n// giant memory regions (i.e. shadow memory regions).\n#define kXnuFastMmapFd 0x4\nstatic size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB\nstatic bool use_xnu_fast_mmap = false;\n\nuptr internal_mmap(void *addr, size_t length, int prot, int flags,\n                   int fd, u64 offset) {\n  if (fd == -1) {\n    fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);\n    if (length >= kXnuFastMmapThreshold) {\n      if (use_xnu_fast_mmap) fd |= kXnuFastMmapFd;\n    }\n  }\n  if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);\n  return (uptr)mmap(addr, length, prot, flags, fd, offset);\n}\n\nuptr internal_munmap(void *addr, uptr length) {\n  if (&__munmap) return __munmap(addr, length);\n  return munmap(addr, length);\n}\n\nuptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,\n                     void *new_address) {\n  CHECK(false && \"internal_mremap is unimplemented on Mac\");\n  return 0;\n}\n\nint internal_mprotect(void *addr, uptr length, int prot) {\n  return mprotect(addr, length, prot);\n}\n\nint internal_madvise(uptr addr, uptr length, int advice) {\n  return madvise((void *)addr, length, advice);\n}\n\nuptr internal_close(fd_t fd) {\n  return close(fd);\n}\n\nuptr internal_open(const char *filename, int flags) {\n  return open(filename, flags);\n}\n\nuptr internal_open(const char *filename, int flags, u32 mode) {\n  return open(filename, flags, mode);\n}\n\nuptr internal_read(fd_t fd, void *buf, uptr count) {\n  return read(fd, buf, count);\n}\n\nuptr internal_write(fd_t fd, const void *buf, uptr count) {\n  return write(fd, buf, count);\n}\n\nuptr internal_stat(const char *path, void *buf) {\n  return stat(path, (struct stat *)buf);\n}\n\nuptr internal_lstat(const char *path, void *buf) {\n  return lstat(path, (struct stat *)buf);\n}\n\nuptr internal_fstat(fd_t fd, void *buf) {\n  return fstat(fd, (struct stat *)buf);\n}\n\nuptr internal_filesize(fd_t fd) {\n  struct stat st;\n  if (internal_fstat(fd, &st))\n    return -1;\n  return (uptr)st.st_size;\n}\n\nuptr internal_dup(int oldfd) {\n  return dup(oldfd);\n}\n\nuptr internal_dup2(int oldfd, int newfd) {\n  return dup2(oldfd, newfd);\n}\n\nuptr internal_readlink(const char *path, char *buf, uptr bufsize) {\n  return readlink(path, buf, bufsize);\n}\n\nuptr internal_unlink(const char *path) {\n  return unlink(path);\n}\n\nuptr internal_sched_yield() {\n  return sched_yield();\n}\n\nvoid internal__exit(int exitcode) {\n  _exit(exitcode);\n}\n\nvoid internal_usleep(u64 useconds) { usleep(useconds); }\n\nuptr internal_getpid() {\n  return getpid();\n}\n\nint internal_dlinfo(void *handle, int request, void *p) {\n  UNIMPLEMENTED();\n}\n\nint internal_sigaction(int signum, const void *act, void *oldact) {\n  return sigaction(signum,\n                   (const struct sigaction *)act, (struct sigaction *)oldact);\n}\n\nvoid internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }\n\nuptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,\n                          __sanitizer_sigset_t *oldset) {\n  // Don't use sigprocmask here, because it affects all threads.\n  return pthread_sigmask(how, set, oldset);\n}\n\n// Doesn't call pthread_atfork() handlers (but not available on 10.6).\nextern \"C\" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;\n\nint internal_fork() {\n  if (&__fork)\n    return __fork();\n  return fork();\n}\n\nint internal_sysctl(const int *name, unsigned int namelen, void *oldp,\n                    uptr *oldlenp, const void *newp, uptr newlen) {\n  return sysctl(const_cast<int *>(name), namelen, oldp, (size_t *)oldlenp,\n                const_cast<void *>(newp), (size_t)newlen);\n}\n\nint internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,\n                          const void *newp, uptr newlen) {\n  return sysctlbyname(sname, oldp, (size_t *)oldlenp, const_cast<void *>(newp),\n                      (size_t)newlen);\n}\n\nstatic fd_t internal_spawn_impl(const char *argv[], const char *envp[],\n                                pid_t *pid) {\n  fd_t primary_fd = kInvalidFd;\n  fd_t secondary_fd = kInvalidFd;\n\n  auto fd_closer = at_scope_exit([&] {\n    internal_close(primary_fd);\n    internal_close(secondary_fd);\n  });\n\n  // We need a new pseudoterminal to avoid buffering problems. The 'atos' tool\n  // in particular detects when it's talking to a pipe and forgets to flush the\n  // output stream after sending a response.\n  primary_fd = posix_openpt(O_RDWR);\n  if (primary_fd == kInvalidFd)\n    return kInvalidFd;\n\n  int res = grantpt(primary_fd) || unlockpt(primary_fd);\n  if (res != 0) return kInvalidFd;\n\n  // Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.\n  char secondary_pty_name[128];\n  res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name);\n  if (res == -1) return kInvalidFd;\n\n  secondary_fd = internal_open(secondary_pty_name, O_RDWR);\n  if (secondary_fd == kInvalidFd)\n    return kInvalidFd;\n\n  // File descriptor actions\n  posix_spawn_file_actions_t acts;\n  res = posix_spawn_file_actions_init(&acts);\n  if (res != 0) return kInvalidFd;\n\n  auto acts_cleanup = at_scope_exit([&] {\n    posix_spawn_file_actions_destroy(&acts);\n  });\n\n  res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) ||\n        posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) ||\n        posix_spawn_file_actions_addclose(&acts, secondary_fd);\n  if (res != 0) return kInvalidFd;\n\n  // Spawn attributes\n  posix_spawnattr_t attrs;\n  res = posix_spawnattr_init(&attrs);\n  if (res != 0) return kInvalidFd;\n\n  auto attrs_cleanup  = at_scope_exit([&] {\n    posix_spawnattr_destroy(&attrs);\n  });\n\n  // In the spawned process, close all file descriptors that are not explicitly\n  // described by the file actions object. This is Darwin-specific extension.\n  res = posix_spawnattr_setflags(&attrs, POSIX_SPAWN_CLOEXEC_DEFAULT);\n  if (res != 0) return kInvalidFd;\n\n  // posix_spawn\n  char **argv_casted = const_cast<char **>(argv);\n  char **envp_casted = const_cast<char **>(envp);\n  res = posix_spawn(pid, argv[0], &acts, &attrs, argv_casted, envp_casted);\n  if (res != 0) return kInvalidFd;\n\n  // Disable echo in the new terminal, disable CR.\n  struct termios termflags;\n  tcgetattr(primary_fd, &termflags);\n  termflags.c_oflag &= ~ONLCR;\n  termflags.c_lflag &= ~ECHO;\n  tcsetattr(primary_fd, TCSANOW, &termflags);\n\n  // On success, do not close primary_fd on scope exit.\n  fd_t fd = primary_fd;\n  primary_fd = kInvalidFd;\n\n  return fd;\n}\n\nfd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid) {\n  // The client program may close its stdin and/or stdout and/or stderr thus\n  // allowing open/posix_openpt to reuse file descriptors 0, 1 or 2. In this\n  // case the communication is broken if either the parent or the child tries to\n  // close or duplicate these descriptors. We temporarily reserve these\n  // descriptors here to prevent this.\n  fd_t low_fds[3];\n  size_t count = 0;\n\n  for (; count < 3; count++) {\n    low_fds[count] = posix_openpt(O_RDWR);\n    if (low_fds[count] >= STDERR_FILENO)\n      break;\n  }\n\n  fd_t fd = internal_spawn_impl(argv, envp, pid);\n\n  for (; count > 0; count--) {\n    internal_close(low_fds[count]);\n  }\n\n  return fd;\n}\n\nuptr internal_rename(const char *oldpath, const char *newpath) {\n  return rename(oldpath, newpath);\n}\n\nuptr internal_ftruncate(fd_t fd, uptr size) {\n  return ftruncate(fd, size);\n}\n\nuptr internal_execve(const char *filename, char *const argv[],\n                     char *const envp[]) {\n  return execve(filename, argv, envp);\n}\n\nuptr internal_waitpid(int pid, int *status, int options) {\n  return waitpid(pid, status, options);\n}\n\n// ----------------- sanitizer_common.h\nbool FileExists(const char *filename) {\n  if (ShouldMockFailureToOpen(filename))\n    return false;\n  struct stat st;\n  if (stat(filename, &st))\n    return false;\n  // Sanity check: filename is a regular file.\n  return S_ISREG(st.st_mode);\n}\n\ntid_t GetTid() {\n  tid_t tid;\n  pthread_threadid_np(nullptr, &tid);\n  return tid;\n}\n\nvoid GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,\n                                uptr *stack_bottom) {\n  CHECK(stack_top);\n  CHECK(stack_bottom);\n  uptr stacksize = pthread_get_stacksize_np(pthread_self());\n  // pthread_get_stacksize_np() returns an incorrect stack size for the main\n  // thread on Mavericks. See\n  // https://github.com/google/sanitizers/issues/261\n  if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization &&\n      stacksize == (1 << 19))  {\n    struct rlimit rl;\n    CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);\n    // Most often rl.rlim_cur will be the desired 8M.\n    if (rl.rlim_cur < kMaxThreadStackSize) {\n      stacksize = rl.rlim_cur;\n    } else {\n      stacksize = kMaxThreadStackSize;\n    }\n  }\n  void *stackaddr = pthread_get_stackaddr_np(pthread_self());\n  *stack_top = (uptr)stackaddr;\n  *stack_bottom = *stack_top - stacksize;\n}\n\nchar **GetEnviron() {\n#if !SANITIZER_IOS\n  char ***env_ptr = _NSGetEnviron();\n  if (!env_ptr) {\n    Report(\"_NSGetEnviron() returned NULL. Please make sure __asan_init() is \"\n           \"called after libSystem_initializer().\\n\");\n    CHECK(env_ptr);\n  }\n  char **environ = *env_ptr;\n#endif\n  CHECK(environ);\n  return environ;\n}\n\nconst char *GetEnv(const char *name) {\n  char **env = GetEnviron();\n  uptr name_len = internal_strlen(name);\n  while (*env != 0) {\n    uptr len = internal_strlen(*env);\n    if (len > name_len) {\n      const char *p = *env;\n      if (!internal_memcmp(p, name, name_len) &&\n          p[name_len] == '=') {  // Match.\n        return *env + name_len + 1;  // String starting after =.\n      }\n    }\n    env++;\n  }\n  return 0;\n}\n\nuptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {\n  CHECK_LE(kMaxPathLength, buf_len);\n\n  // On OS X the executable path is saved to the stack by dyld. Reading it\n  // from there is much faster than calling dladdr, especially for large\n  // binaries with symbols.\n  InternalMmapVector<char> exe_path(kMaxPathLength);\n  uint32_t size = exe_path.size();\n  if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&\n      realpath(exe_path.data(), buf) != 0) {\n    return internal_strlen(buf);\n  }\n  return 0;\n}\n\nuptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {\n  return ReadBinaryName(buf, buf_len);\n}\n\nvoid ReExec() {\n  UNIMPLEMENTED();\n}\n\nvoid CheckASLR() {\n  // Do nothing\n}\n\nvoid CheckMPROTECT() {\n  // Do nothing\n}\n\nuptr GetPageSize() {\n  return sysconf(_SC_PAGESIZE);\n}\n\nextern \"C\" unsigned malloc_num_zones;\nextern \"C\" malloc_zone_t **malloc_zones;\nmalloc_zone_t sanitizer_zone;\n\n// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If\n// libmalloc tries to set up a different zone as malloc_zones[0], it will call\n// mprotect(malloc_zones, ..., PROT_READ).  This interceptor will catch that and\n// make sure we are still the first (default) zone.\nvoid MprotectMallocZones(void *addr, int prot) {\n  if (addr == malloc_zones && prot == PROT_READ) {\n    if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {\n      for (unsigned i = 1; i < malloc_num_zones; i++) {\n        if (malloc_zones[i] == &sanitizer_zone) {\n          // Swap malloc_zones[0] and malloc_zones[i].\n          malloc_zones[i] = malloc_zones[0];\n          malloc_zones[0] = &sanitizer_zone;\n          break;\n        }\n      }\n    }\n  }\n}\n\nvoid FutexWait(atomic_uint32_t *p, u32 cmp) {\n  // FIXME: implement actual blocking.\n  sched_yield();\n}\n\nvoid FutexWake(atomic_uint32_t *p, u32 count) {}\n\nu64 NanoTime() {\n  timeval tv;\n  internal_memset(&tv, 0, sizeof(tv));\n  gettimeofday(&tv, 0);\n  return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;\n}\n\n// This needs to be called during initialization to avoid being racy.\nu64 MonotonicNanoTime() {\n  static mach_timebase_info_data_t timebase_info;\n  if (timebase_info.denom == 0) mach_timebase_info(&timebase_info);\n  return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom;\n}\n\nuptr GetTlsSize() {\n  return 0;\n}\n\nvoid InitTlsSize() {\n}\n\nuptr TlsBaseAddr() {\n  uptr segbase = 0;\n#if defined(__x86_64__)\n  asm(\"movq %%gs:0,%0\" : \"=r\"(segbase));\n#elif defined(__i386__)\n  asm(\"movl %%gs:0,%0\" : \"=r\"(segbase));\n#elif defined(__aarch64__)\n  asm(\"mrs %x0, tpidrro_el0\" : \"=r\"(segbase));\n  segbase &= 0x07ul;  // clearing lower bits, cpu id stored there\n#endif\n  return segbase;\n}\n\n// The size of the tls on darwin does not appear to be well documented,\n// however the vm memory map suggests that it is 1024 uptrs in size,\n// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.\nuptr TlsSize() {\n#if defined(__x86_64__) || defined(__i386__)\n  return 1024 * sizeof(uptr);\n#else\n  return 0;\n#endif\n}\n\nvoid GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,\n                          uptr *tls_addr, uptr *tls_size) {\n#if !SANITIZER_GO\n  uptr stack_top, stack_bottom;\n  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);\n  *stk_addr = stack_bottom;\n  *stk_size = stack_top - stack_bottom;\n  *tls_addr = TlsBaseAddr();\n  *tls_size = TlsSize();\n#else\n  *stk_addr = 0;\n  *stk_size = 0;\n  *tls_addr = 0;\n  *tls_size = 0;\n#endif\n}\n\nvoid ListOfModules::init() {\n  clearOrInit();\n  MemoryMappingLayout memory_mapping(false);\n  memory_mapping.DumpListOfModules(&modules_);\n}\n\nvoid ListOfModules::fallbackInit() { clear(); }\n\nstatic HandleSignalMode GetHandleSignalModeImpl(int signum) {\n  switch (signum) {\n    case SIGABRT:\n      return common_flags()->handle_abort;\n    case SIGILL:\n      return common_flags()->handle_sigill;\n    case SIGTRAP:\n      return common_flags()->handle_sigtrap;\n    case SIGFPE:\n      return common_flags()->handle_sigfpe;\n    case SIGSEGV:\n      return common_flags()->handle_segv;\n    case SIGBUS:\n      return common_flags()->handle_sigbus;\n  }\n  return kHandleSignalNo;\n}\n\nHandleSignalMode GetHandleSignalMode(int signum) {\n  // Handling fatal signals on watchOS and tvOS devices is disallowed.\n  if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))\n    return kHandleSignalNo;\n  HandleSignalMode result = GetHandleSignalModeImpl(signum);\n  if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)\n    return kHandleSignalExclusive;\n  return result;\n}\n\n// Offset example:\n// XNU 17 -- macOS 10.13 -- iOS 11 -- tvOS 11 -- watchOS 4\nconstexpr u16 GetOSMajorKernelOffset() {\n  if (TARGET_OS_OSX) return 4;\n  if (TARGET_OS_IOS || TARGET_OS_TV) return 6;\n  if (TARGET_OS_WATCH) return 13;\n}\n\nusing VersStr = char[64];\n\nstatic uptr ApproximateOSVersionViaKernelVersion(VersStr vers) {\n  u16 kernel_major = GetDarwinKernelVersion().major;\n  u16 offset = GetOSMajorKernelOffset();\n  CHECK_GE(kernel_major, offset);\n  u16 os_major = kernel_major - offset;\n\n  const char *format = \"%d.0\";\n  if (TARGET_OS_OSX) {\n    if (os_major >= 16) {  // macOS 11+\n      os_major -= 5;\n    } else {  // macOS 10.15 and below\n      format = \"10.%d\";\n    }\n  }\n  return internal_snprintf(vers, sizeof(VersStr), format, os_major);\n}\n\nstatic void GetOSVersion(VersStr vers) {\n  uptr len = sizeof(VersStr);\n  if (SANITIZER_IOSSIM) {\n    const char *vers_env = GetEnv(\"SIMULATOR_RUNTIME_VERSION\");\n    if (!vers_env) {\n      Report(\"ERROR: Running in simulator but SIMULATOR_RUNTIME_VERSION env \"\n          \"var is not set.\\n\");\n      Die();\n    }\n    len = internal_strlcpy(vers, vers_env, len);\n  } else {\n    int res =\n        internal_sysctlbyname(\"kern.osproductversion\", vers, &len, nullptr, 0);\n\n    // XNU 17 (macOS 10.13) and below do not provide the sysctl\n    // `kern.osproductversion` entry (res != 0).\n    bool no_os_version = res != 0;\n\n    // For launchd, sanitizer initialization runs before sysctl is setup\n    // (res == 0 && len != strlen(vers), vers is not a valid version).  However,\n    // the kernel version `kern.osrelease` is available.\n    bool launchd = (res == 0 && internal_strlen(vers) < 3);\n    if (launchd) CHECK_EQ(internal_getpid(), 1);\n\n    if (no_os_version || launchd) {\n      len = ApproximateOSVersionViaKernelVersion(vers);\n    }\n  }\n  CHECK_LT(len, sizeof(VersStr));\n}\n\nvoid ParseVersion(const char *vers, u16 *major, u16 *minor) {\n  // Format: <major>.<minor>[.<patch>]\\0\n  CHECK_GE(internal_strlen(vers), 3);\n  const char *p = vers;\n  *major = internal_simple_strtoll(p, &p, /*base=*/10);\n  CHECK_EQ(*p, '.');\n  p += 1;\n  *minor = internal_simple_strtoll(p, &p, /*base=*/10);\n}\n\n// Aligned versions example:\n// macOS 10.15 -- iOS 13 -- tvOS 13 -- watchOS 6\nstatic void MapToMacos(u16 *major, u16 *minor) {\n  if (TARGET_OS_OSX)\n    return;\n\n  if (TARGET_OS_IOS || TARGET_OS_TV)\n    *major += 2;\n  else if (TARGET_OS_WATCH)\n    *major += 9;\n  else\n    UNREACHABLE(\"unsupported platform\");\n\n  if (*major >= 16) {  // macOS 11+\n    *major -= 5;\n  } else {  // macOS 10.15 and below\n    *minor = *major;\n    *major = 10;\n  }\n}\n\nstatic MacosVersion GetMacosAlignedVersionInternal() {\n  VersStr vers = {};\n  GetOSVersion(vers);\n\n  u16 major, minor;\n  ParseVersion(vers, &major, &minor);\n  MapToMacos(&major, &minor);\n\n  return MacosVersion(major, minor);\n}\n\nstatic_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type),\n              \"MacosVersion cache size\");\nstatic atomic_uint32_t cached_macos_version;\n\nMacosVersion GetMacosAlignedVersion() {\n  atomic_uint32_t::Type result =\n      atomic_load(&cached_macos_version, memory_order_acquire);\n  if (!result) {\n    MacosVersion version = GetMacosAlignedVersionInternal();\n    result = *reinterpret_cast<atomic_uint32_t::Type *>(&version);\n    atomic_store(&cached_macos_version, result, memory_order_release);\n  }\n  return *reinterpret_cast<MacosVersion *>(&result);\n}\n\nDarwinKernelVersion GetDarwinKernelVersion() {\n  VersStr vers = {};\n  uptr len = sizeof(VersStr);\n  int res = internal_sysctlbyname(\"kern.osrelease\", vers, &len, nullptr, 0);\n  CHECK_EQ(res, 0);\n  CHECK_LT(len, sizeof(VersStr));\n\n  u16 major, minor;\n  ParseVersion(vers, &major, &minor);\n\n  return DarwinKernelVersion(major, minor);\n}\n\nuptr GetRSS() {\n  struct task_basic_info info;\n  unsigned count = TASK_BASIC_INFO_COUNT;\n  kern_return_t result =\n      task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count);\n  if (UNLIKELY(result != KERN_SUCCESS)) {\n    Report(\"Cannot get task info. Error: %d\\n\", result);\n    Die();\n  }\n  return info.resident_size;\n}\n\nvoid *internal_start_thread(void *(*func)(void *arg), void *arg) {\n  // Start the thread with signals blocked, otherwise it can steal user signals.\n  __sanitizer_sigset_t set, old;\n  internal_sigfillset(&set);\n  internal_sigprocmask(SIG_SETMASK, &set, &old);\n  pthread_t th;\n  pthread_create(&th, 0, func, arg);\n  internal_sigprocmask(SIG_SETMASK, &old, 0);\n  return th;\n}\n\nvoid internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }\n\n#if !SANITIZER_GO\nstatic Mutex syslog_lock;\n#  endif\n\nvoid WriteOneLineToSyslog(const char *s) {\n#if !SANITIZER_GO\n  syslog_lock.CheckLocked();\n  if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {\n    os_log_error(OS_LOG_DEFAULT, \"%{public}s\", s);\n  } else {\n    asl_log(nullptr, nullptr, ASL_LEVEL_ERR, \"%s\", s);\n  }\n#endif\n}\n\n// buffer to store crash report application information\nstatic char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};\nstatic Mutex crashreporter_info_mutex;\n\nextern \"C\" {\n// Integrate with crash reporter libraries.\n#if HAVE_CRASHREPORTERCLIENT_H\nCRASH_REPORTER_CLIENT_HIDDEN\nstruct crashreporter_annotations_t gCRAnnotations\n    __attribute__((section(\"__DATA,\" CRASHREPORTER_ANNOTATIONS_SECTION))) = {\n        CRASHREPORTER_ANNOTATIONS_VERSION,\n        0,\n        0,\n        0,\n        0,\n        0,\n        0,\n#if CRASHREPORTER_ANNOTATIONS_VERSION > 4\n        0,\n#endif\n};\n\n#else\n// fall back to old crashreporter api\nstatic const char *__crashreporter_info__ __attribute__((__used__)) =\n    &crashreporter_info_buff[0];\nasm(\".desc ___crashreporter_info__, 0x10\");\n#endif\n\n}  // extern \"C\"\n\nstatic void CRAppendCrashLogMessage(const char *msg) {\n  Lock l(&crashreporter_info_mutex);\n  internal_strlcat(crashreporter_info_buff, msg,\n                   sizeof(crashreporter_info_buff));\n#if HAVE_CRASHREPORTERCLIENT_H\n  (void)CRSetCrashLogMessage(crashreporter_info_buff);\n#endif\n}\n\nvoid LogMessageOnPrintf(const char *str) {\n  // Log all printf output to CrashLog.\n  if (common_flags()->abort_on_error)\n    CRAppendCrashLogMessage(str);\n}\n\nvoid LogFullErrorReport(const char *buffer) {\n#if !SANITIZER_GO\n  // Log with os_trace. This will make it into the crash log.\n#if SANITIZER_OS_TRACE\n  if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) {\n    // os_trace requires the message (format parameter) to be a string literal.\n    if (internal_strncmp(SanitizerToolName, \"AddressSanitizer\",\n                         sizeof(\"AddressSanitizer\") - 1) == 0)\n      os_trace(\"Address Sanitizer reported a failure.\");\n    else if (internal_strncmp(SanitizerToolName, \"UndefinedBehaviorSanitizer\",\n                              sizeof(\"UndefinedBehaviorSanitizer\") - 1) == 0)\n      os_trace(\"Undefined Behavior Sanitizer reported a failure.\");\n    else if (internal_strncmp(SanitizerToolName, \"ThreadSanitizer\",\n                              sizeof(\"ThreadSanitizer\") - 1) == 0)\n      os_trace(\"Thread Sanitizer reported a failure.\");\n    else\n      os_trace(\"Sanitizer tool reported a failure.\");\n\n    if (common_flags()->log_to_syslog)\n      os_trace(\"Consult syslog for more information.\");\n  }\n#endif\n\n  // Log to syslog.\n  // The logging on OS X may call pthread_create so we need the threading\n  // environment to be fully initialized. Also, this should never be called when\n  // holding the thread registry lock since that may result in a deadlock. If\n  // the reporting thread holds the thread registry mutex, and asl_log waits\n  // for GCD to dispatch a new thread, the process will deadlock, because the\n  // pthread_create wrapper needs to acquire the lock as well.\n  Lock l(&syslog_lock);\n  if (common_flags()->log_to_syslog)\n    WriteToSyslog(buffer);\n\n  // The report is added to CrashLog as part of logging all of Printf output.\n#endif\n}\n\nSignalContext::WriteFlag SignalContext::GetWriteFlag() const {\n#if defined(__x86_64__) || defined(__i386__)\n  ucontext_t *ucontext = static_cast<ucontext_t*>(context);\n  return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;\n#else\n  return Unknown;\n#endif\n}\n\nbool SignalContext::IsTrueFaultingAddress() const {\n  auto si = static_cast<const siginfo_t *>(siginfo);\n  // \"Real\" SIGSEGV codes (e.g., SEGV_MAPERR, SEGV_MAPERR) are non-zero.\n  return si->si_signo == SIGSEGV && si->si_code != 0;\n}\n\n#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)\n  #define AARCH64_GET_REG(r) \\\n    (uptr)ptrauth_strip(     \\\n        (void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)\n#else\n  #define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r\n#endif\n\nstatic void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {\n  ucontext_t *ucontext = (ucontext_t*)context;\n# if defined(__aarch64__)\n  *pc = AARCH64_GET_REG(pc);\n#   if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0\n  *bp = AARCH64_GET_REG(fp);\n#   else\n  *bp = AARCH64_GET_REG(lr);\n#   endif\n  *sp = AARCH64_GET_REG(sp);\n# elif defined(__x86_64__)\n  *pc = ucontext->uc_mcontext->__ss.__rip;\n  *bp = ucontext->uc_mcontext->__ss.__rbp;\n  *sp = ucontext->uc_mcontext->__ss.__rsp;\n# elif defined(__arm__)\n  *pc = ucontext->uc_mcontext->__ss.__pc;\n  *bp = ucontext->uc_mcontext->__ss.__r[7];\n  *sp = ucontext->uc_mcontext->__ss.__sp;\n# elif defined(__i386__)\n  *pc = ucontext->uc_mcontext->__ss.__eip;\n  *bp = ucontext->uc_mcontext->__ss.__ebp;\n  *sp = ucontext->uc_mcontext->__ss.__esp;\n# else\n# error \"Unknown architecture\"\n# endif\n}\n\nvoid SignalContext::InitPcSpBp() {\n  addr = (uptr)ptrauth_strip((void *)addr, 0);\n  GetPcSpBp(context, &pc, &sp, &bp);\n}\n\n// ASan/TSan use mmap in a way that creates “deallocation gaps” which triggers\n// EXC_GUARD exceptions on macOS 10.15+ (XNU 19.0+).\nstatic void DisableMmapExcGuardExceptions() {\n  using task_exc_guard_behavior_t = uint32_t;\n  using task_set_exc_guard_behavior_t =\n      kern_return_t(task_t task, task_exc_guard_behavior_t behavior);\n  auto *set_behavior = (task_set_exc_guard_behavior_t *)dlsym(\n      RTLD_DEFAULT, \"task_set_exc_guard_behavior\");\n  if (set_behavior == nullptr) return;\n  const task_exc_guard_behavior_t task_exc_guard_none = 0;\n  set_behavior(mach_task_self(), task_exc_guard_none);\n}\n\nvoid InitializePlatformEarly() {\n  // Only use xnu_fast_mmap when on x86_64 and the kernel supports it.\n  use_xnu_fast_mmap =\n#if defined(__x86_64__)\n      GetDarwinKernelVersion() >= DarwinKernelVersion(17, 5);\n#else\n      false;\n#endif\n  if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))\n    DisableMmapExcGuardExceptions();\n}\n\n#if !SANITIZER_GO\nstatic const char kDyldInsertLibraries[] = \"DYLD_INSERT_LIBRARIES\";\nLowLevelAllocator allocator_for_env;\n\n// Change the value of the env var |name|, leaking the original value.\n// If |name_value| is NULL, the variable is deleted from the environment,\n// otherwise the corresponding \"NAME=value\" string is replaced with\n// |name_value|.\nvoid LeakyResetEnv(const char *name, const char *name_value) {\n  char **env = GetEnviron();\n  uptr name_len = internal_strlen(name);\n  while (*env != 0) {\n    uptr len = internal_strlen(*env);\n    if (len > name_len) {\n      const char *p = *env;\n      if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {\n        // Match.\n        if (name_value) {\n          // Replace the old value with the new one.\n          *env = const_cast<char*>(name_value);\n        } else {\n          // Shift the subsequent pointers back.\n          char **del = env;\n          do {\n            del[0] = del[1];\n          } while (*del++);\n        }\n      }\n    }\n    env++;\n  }\n}\n\nSANITIZER_WEAK_CXX_DEFAULT_IMPL\nbool ReexecDisabled() {\n  return false;\n}\n\nstatic bool DyldNeedsEnvVariable() {\n  // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if\n  // DYLD_INSERT_LIBRARIES is not set.\n  return GetMacosAlignedVersion() < MacosVersion(10, 11);\n}\n\nvoid MaybeReexec() {\n  // FIXME: This should really live in some \"InitializePlatform\" method.\n  MonotonicNanoTime();\n\n  if (ReexecDisabled()) return;\n\n  // Make sure the dynamic runtime library is preloaded so that the\n  // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec\n  // ourselves.\n  Dl_info info;\n  RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));\n  char *dyld_insert_libraries =\n      const_cast<char*>(GetEnv(kDyldInsertLibraries));\n  uptr old_env_len = dyld_insert_libraries ?\n      internal_strlen(dyld_insert_libraries) : 0;\n  uptr fname_len = internal_strlen(info.dli_fname);\n  const char *dylib_name = StripModuleName(info.dli_fname);\n  uptr dylib_name_len = internal_strlen(dylib_name);\n\n  bool lib_is_in_env = dyld_insert_libraries &&\n                       internal_strstr(dyld_insert_libraries, dylib_name);\n  if (DyldNeedsEnvVariable() && !lib_is_in_env) {\n    // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime\n    // library.\n    InternalMmapVector<char> program_name(1024);\n    uint32_t buf_size = program_name.size();\n    _NSGetExecutablePath(program_name.data(), &buf_size);\n    char *new_env = const_cast<char*>(info.dli_fname);\n    if (dyld_insert_libraries) {\n      // Append the runtime dylib name to the existing value of\n      // DYLD_INSERT_LIBRARIES.\n      new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);\n      internal_strncpy(new_env, dyld_insert_libraries, old_env_len);\n      new_env[old_env_len] = ':';\n      // Copy fname_len and add a trailing zero.\n      internal_strncpy(new_env + old_env_len + 1, info.dli_fname,\n                       fname_len + 1);\n      // Ok to use setenv() since the wrappers don't depend on the value of\n      // asan_inited.\n      setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);\n    } else {\n      // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.\n      setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);\n    }\n    VReport(1, \"exec()-ing the program with\\n\");\n    VReport(1, \"%s=%s\\n\", kDyldInsertLibraries, new_env);\n    VReport(1, \"to enable wrappers.\\n\");\n    execv(program_name.data(), *_NSGetArgv());\n\n    // We get here only if execv() failed.\n    Report(\"ERROR: The process is launched without DYLD_INSERT_LIBRARIES, \"\n           \"which is required for the sanitizer to work. We tried to set the \"\n           \"environment variable and re-execute itself, but execv() failed, \"\n           \"possibly because of sandbox restrictions. Make sure to launch the \"\n           \"executable with:\\n%s=%s\\n\", kDyldInsertLibraries, new_env);\n    RAW_CHECK(\"execv failed\" && 0);\n  }\n\n  // Verify that interceptors really work.  We'll use dlsym to locate\n  // \"pthread_create\", if interceptors are working, it should really point to\n  // \"wrap_pthread_create\" within our own dylib.\n  Dl_info info_pthread_create;\n  void *dlopen_addr = dlsym(RTLD_DEFAULT, \"pthread_create\");\n  RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));\n  if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {\n    Report(\n        \"ERROR: Interceptors are not working. This may be because %s is \"\n        \"loaded too late (e.g. via dlopen). Please launch the executable \"\n        \"with:\\n%s=%s\\n\",\n        SanitizerToolName, kDyldInsertLibraries, info.dli_fname);\n    RAW_CHECK(\"interceptors not installed\" && 0);\n  }\n\n  if (!lib_is_in_env)\n    return;\n\n  if (!common_flags()->strip_env)\n    return;\n\n  // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove\n  // the dylib from the environment variable, because interceptors are installed\n  // and we don't want our children to inherit the variable.\n\n  uptr env_name_len = internal_strlen(kDyldInsertLibraries);\n  // Allocate memory to hold the previous env var name, its value, the '='\n  // sign and the '\\0' char.\n  char *new_env = (char*)allocator_for_env.Allocate(\n      old_env_len + 2 + env_name_len);\n  RAW_CHECK(new_env);\n  internal_memset(new_env, '\\0', old_env_len + 2 + env_name_len);\n  internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);\n  new_env[env_name_len] = '=';\n  char *new_env_pos = new_env + env_name_len + 1;\n\n  // Iterate over colon-separated pieces of |dyld_insert_libraries|.\n  char *piece_start = dyld_insert_libraries;\n  char *piece_end = NULL;\n  char *old_env_end = dyld_insert_libraries + old_env_len;\n  do {\n    if (piece_start[0] == ':') piece_start++;\n    piece_end = internal_strchr(piece_start, ':');\n    if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;\n    if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;\n    uptr piece_len = piece_end - piece_start;\n\n    char *filename_start =\n        (char *)internal_memrchr(piece_start, '/', piece_len);\n    uptr filename_len = piece_len;\n    if (filename_start) {\n      filename_start += 1;\n      filename_len = piece_len - (filename_start - piece_start);\n    } else {\n      filename_start = piece_start;\n    }\n\n    // If the current piece isn't the runtime library name,\n    // append it to new_env.\n    if ((dylib_name_len != filename_len) ||\n        (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {\n      if (new_env_pos != new_env + env_name_len + 1) {\n        new_env_pos[0] = ':';\n        new_env_pos++;\n      }\n      internal_strncpy(new_env_pos, piece_start, piece_len);\n      new_env_pos += piece_len;\n    }\n    // Move on to the next piece.\n    piece_start = piece_end;\n  } while (piece_start < old_env_end);\n\n  // Can't use setenv() here, because it requires the allocator to be\n  // initialized.\n  // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in\n  // a separate function called after InitializeAllocator().\n  if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;\n  LeakyResetEnv(kDyldInsertLibraries, new_env);\n}\n#endif  // SANITIZER_GO\n\nchar **GetArgv() {\n  return *_NSGetArgv();\n}\n\n#if SANITIZER_IOS && !SANITIZER_IOSSIM\n// The task_vm_info struct is normally provided by the macOS SDK, but we need\n// fields only available in 10.12+. Declare the struct manually to be able to\n// build against older SDKs.\nstruct __sanitizer_task_vm_info {\n  mach_vm_size_t virtual_size;\n  integer_t region_count;\n  integer_t page_size;\n  mach_vm_size_t resident_size;\n  mach_vm_size_t resident_size_peak;\n  mach_vm_size_t device;\n  mach_vm_size_t device_peak;\n  mach_vm_size_t internal;\n  mach_vm_size_t internal_peak;\n  mach_vm_size_t external;\n  mach_vm_size_t external_peak;\n  mach_vm_size_t reusable;\n  mach_vm_size_t reusable_peak;\n  mach_vm_size_t purgeable_volatile_pmap;\n  mach_vm_size_t purgeable_volatile_resident;\n  mach_vm_size_t purgeable_volatile_virtual;\n  mach_vm_size_t compressed;\n  mach_vm_size_t compressed_peak;\n  mach_vm_size_t compressed_lifetime;\n  mach_vm_size_t phys_footprint;\n  mach_vm_address_t min_address;\n  mach_vm_address_t max_address;\n};\n#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \\\n    (sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))\n\nstatic uptr GetTaskInfoMaxAddress() {\n  __sanitizer_task_vm_info vm_info = {} /* zero initialize */;\n  mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;\n  int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);\n  return err ? 0 : vm_info.max_address;\n}\n\nuptr GetMaxUserVirtualAddress() {\n  static uptr max_vm = GetTaskInfoMaxAddress();\n  if (max_vm != 0) {\n    const uptr ret_value = max_vm - 1;\n    CHECK_LE(ret_value, SANITIZER_MMAP_RANGE_SIZE);\n    return ret_value;\n  }\n\n  // xnu cannot provide vm address limit\n# if SANITIZER_WORDSIZE == 32\n  constexpr uptr fallback_max_vm = 0xffe00000 - 1;\n# else\n  constexpr uptr fallback_max_vm = 0x200000000 - 1;\n# endif\n  static_assert(fallback_max_vm <= SANITIZER_MMAP_RANGE_SIZE,\n                \"Max virtual address must be less than mmap range size.\");\n  return fallback_max_vm;\n}\n\n#else // !SANITIZER_IOS\n\nuptr GetMaxUserVirtualAddress() {\n# if SANITIZER_WORDSIZE == 64\n  constexpr uptr max_vm = (1ULL << 47) - 1;  // 0x00007fffffffffffUL;\n# else // SANITIZER_WORDSIZE == 32\n  static_assert(SANITIZER_WORDSIZE == 32, \"Wrong wordsize\");\n  constexpr uptr max_vm = (1ULL << 32) - 1;  // 0xffffffff;\n# endif\n  static_assert(max_vm <= SANITIZER_MMAP_RANGE_SIZE,\n                \"Max virtual address must be less than mmap range size.\");\n  return max_vm;\n}\n#endif\n\nuptr GetMaxVirtualAddress() {\n  return GetMaxUserVirtualAddress();\n}\n\nuptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,\n                      uptr min_shadow_base_alignment, uptr &high_mem_end) {\n  const uptr granularity = GetMmapGranularity();\n  const uptr alignment =\n      Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);\n  const uptr left_padding =\n      Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);\n\n  uptr space_size = shadow_size_bytes + left_padding;\n\n  uptr largest_gap_found = 0;\n  uptr max_occupied_addr = 0;\n  VReport(2, \"FindDynamicShadowStart, space_size = %p\\n\", (void *)space_size);\n  uptr shadow_start =\n      FindAvailableMemoryRange(space_size, alignment, granularity,\n                               &largest_gap_found, &max_occupied_addr);\n  // If the shadow doesn't fit, restrict the address space to make it fit.\n  if (shadow_start == 0) {\n    VReport(\n        2,\n        \"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\\n\",\n        (void *)largest_gap_found, (void *)max_occupied_addr);\n    uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);\n    if (new_max_vm < max_occupied_addr) {\n      Report(\"Unable to find a memory range for dynamic shadow.\\n\");\n      Report(\n          \"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, \"\n          \"new_max_vm = %p\\n\",\n          (void *)space_size, (void *)largest_gap_found,\n          (void *)max_occupied_addr, (void *)new_max_vm);\n      CHECK(0 && \"cannot place shadow\");\n    }\n    RestrictMemoryToMaxAddress(new_max_vm);\n    high_mem_end = new_max_vm - 1;\n    space_size = (high_mem_end >> shadow_scale) + left_padding;\n    VReport(2, \"FindDynamicShadowStart, space_size = %p\\n\", (void *)space_size);\n    shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,\n                                            nullptr, nullptr);\n    if (shadow_start == 0) {\n      Report(\"Unable to find a memory range after restricting VM.\\n\");\n      CHECK(0 && \"cannot place shadow after restricting vm\");\n    }\n  }\n  CHECK_NE((uptr)0, shadow_start);\n  CHECK(IsAligned(shadow_start, alignment));\n  return shadow_start;\n}\n\nuptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,\n                                uptr num_aliases, uptr ring_buffer_size) {\n  CHECK(false && \"HWASan aliasing is unimplemented on Mac\");\n  return 0;\n}\n\nuptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,\n                              uptr *largest_gap_found,\n                              uptr *max_occupied_addr) {\n  typedef vm_region_submap_short_info_data_64_t RegionInfo;\n  enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };\n  // Start searching for available memory region past PAGEZERO, which is\n  // 4KB on 32-bit and 4GB on 64-bit.\n  mach_vm_address_t start_address =\n    (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;\n\n  mach_vm_address_t address = start_address;\n  mach_vm_address_t free_begin = start_address;\n  kern_return_t kr = KERN_SUCCESS;\n  if (largest_gap_found) *largest_gap_found = 0;\n  if (max_occupied_addr) *max_occupied_addr = 0;\n  while (kr == KERN_SUCCESS) {\n    mach_vm_size_t vmsize = 0;\n    natural_t depth = 0;\n    RegionInfo vminfo;\n    mach_msg_type_number_t count = kRegionInfoSize;\n    kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,\n                                (vm_region_info_t)&vminfo, &count);\n    if (kr == KERN_INVALID_ADDRESS) {\n      // No more regions beyond \"address\", consider the gap at the end of VM.\n      address = GetMaxVirtualAddress() + 1;\n      vmsize = 0;\n    } else {\n      if (max_occupied_addr) *max_occupied_addr = address + vmsize;\n    }\n    if (free_begin != address) {\n      // We found a free region [free_begin..address-1].\n      uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);\n      uptr gap_end = RoundDownTo((uptr)address, alignment);\n      uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;\n      if (size < gap_size) {\n        return gap_start;\n      }\n\n      if (largest_gap_found && *largest_gap_found < gap_size) {\n        *largest_gap_found = gap_size;\n      }\n    }\n    // Move to the next region.\n    address += vmsize;\n    free_begin = address;\n  }\n\n  // We looked at all free regions and could not find one large enough.\n  return 0;\n}\n\n// FIXME implement on this platform.\nvoid GetMemoryProfile(fill_profile_f cb, uptr *stats) {}\n\nvoid SignalContext::DumpAllRegisters(void *context) {\n  Report(\"Register values:\\n\");\n\n  ucontext_t *ucontext = (ucontext_t*)context;\n# define DUMPREG64(r) \\\n    Printf(\"%s = 0x%016llx  \", #r, ucontext->uc_mcontext->__ss.__ ## r);\n# define DUMPREGA64(r) \\\n    Printf(\"   %s = 0x%016lx  \", #r, AARCH64_GET_REG(r));\n# define DUMPREG32(r) \\\n    Printf(\"%s = 0x%08x  \", #r, ucontext->uc_mcontext->__ss.__ ## r);\n# define DUMPREG_(r)   Printf(\" \"); DUMPREG(r);\n# define DUMPREG__(r)  Printf(\"  \"); DUMPREG(r);\n# define DUMPREG___(r) Printf(\"   \"); DUMPREG(r);\n\n# if defined(__x86_64__)\n#  define DUMPREG(r) DUMPREG64(r)\n  DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf(\"\\n\");\n  DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf(\"\\n\");\n  DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf(\"\\n\");\n  DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf(\"\\n\");\n# elif defined(__i386__)\n#  define DUMPREG(r) DUMPREG32(r)\n  DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf(\"\\n\");\n  DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf(\"\\n\");\n# elif defined(__aarch64__)\n#  define DUMPREG(r) DUMPREG64(r)\n  DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf(\"\\n\");\n  DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf(\"\\n\");\n  DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf(\"\\n\");\n  DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf(\"\\n\");\n  DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf(\"\\n\");\n  DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf(\"\\n\");\n  DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf(\"\\n\");\n  DUMPREG(x[28]); DUMPREGA64(fp); DUMPREGA64(lr); DUMPREGA64(sp); Printf(\"\\n\");\n# elif defined(__arm__)\n#  define DUMPREG(r) DUMPREG32(r)\n  DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf(\"\\n\");\n  DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf(\"\\n\");\n  DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf(\"\\n\");\n  DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf(\"\\n\");\n# else\n# error \"Unknown architecture\"\n# endif\n\n# undef DUMPREG64\n# undef DUMPREG32\n# undef DUMPREG_\n# undef DUMPREG__\n# undef DUMPREG___\n# undef DUMPREG\n}\n\nstatic inline bool CompareBaseAddress(const LoadedModule &a,\n                                      const LoadedModule &b) {\n  return a.base_address() < b.base_address();\n}\n\nvoid FormatUUID(char *out, uptr size, const u8 *uuid) {\n  internal_snprintf(out, size,\n                    \"<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-\"\n                    \"%02X%02X%02X%02X%02X%02X>\",\n                    uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],\n                    uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],\n                    uuid[12], uuid[13], uuid[14], uuid[15]);\n}\n\nvoid DumpProcessMap() {\n  Printf(\"Process module map:\\n\");\n  MemoryMappingLayout memory_mapping(false);\n  InternalMmapVector<LoadedModule> modules;\n  modules.reserve(128);\n  memory_mapping.DumpListOfModules(&modules);\n  Sort(modules.data(), modules.size(), CompareBaseAddress);\n  for (uptr i = 0; i < modules.size(); ++i) {\n    char uuid_str[128];\n    FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());\n    Printf(\"0x%zx-0x%zx %s (%s) %s\\n\", modules[i].base_address(),\n           modules[i].max_executable_address(), modules[i].full_name(),\n           ModuleArchToString(modules[i].arch()), uuid_str);\n  }\n  Printf(\"End of module map.\\n\");\n}\n\nvoid CheckNoDeepBind(const char *filename, int flag) {\n  // Do nothing.\n}\n\nbool GetRandom(void *buffer, uptr length, bool blocking) {\n  if (!buffer || !length || length > 256)\n    return false;\n  // arc4random never fails.\n  REAL(arc4random_buf)(buffer, length);\n  return true;\n}\n\nu32 GetNumberOfCPUs() {\n  return (u32)sysconf(_SC_NPROCESSORS_ONLN);\n}\n\nvoid InitializePlatformCommonFlags(CommonFlags *cf) {}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_mac.h",
    "content": "//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries and\n// provides definitions for OSX-specific functions.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_MAC_H\n#define SANITIZER_MAC_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_platform.h\"\n#if SANITIZER_MAC\n#include \"sanitizer_posix.h\"\n\nnamespace __sanitizer {\n\nstruct MemoryMappingLayoutData {\n  int current_image;\n  u32 current_magic;\n  u32 current_filetype;\n  ModuleArch current_arch;\n  u8 current_uuid[kModuleUUIDSize];\n  int current_load_cmd_count;\n  const char *current_load_cmd_addr;\n  bool current_instrumented;\n};\n\ntemplate <typename VersionType>\nstruct VersionBase {\n  u16 major;\n  u16 minor;\n\n  VersionBase(u16 major, u16 minor) : major(major), minor(minor) {}\n\n  bool operator==(const VersionType &other) const {\n    return major == other.major && minor == other.minor;\n  }\n  bool operator>=(const VersionType &other) const {\n    return major > other.major ||\n           (major == other.major && minor >= other.minor);\n  }\n  bool operator<(const VersionType &other) const { return !(*this >= other); }\n};\n\nstruct MacosVersion : VersionBase<MacosVersion> {\n  MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {}\n};\n\nstruct DarwinKernelVersion : VersionBase<DarwinKernelVersion> {\n  DarwinKernelVersion(u16 major, u16 minor) : VersionBase(major, minor) {}\n};\n\nMacosVersion GetMacosAlignedVersion();\nDarwinKernelVersion GetDarwinKernelVersion();\n\nchar **GetEnviron();\n\nvoid RestrictMemoryToMaxAddress(uptr max_address);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n#endif  // SANITIZER_MAC_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_mac_libcdep.cpp",
    "content": "//===-- sanitizer_mac_libcdep.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries and\n// implements OSX-specific functions.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_MAC\n#include \"sanitizer_mac.h\"\n\n#include <sys/mman.h>\n\nnamespace __sanitizer {\n\nvoid RestrictMemoryToMaxAddress(uptr max_address) {\n  uptr size_to_mmap = GetMaxUserVirtualAddress() + 1 - max_address;\n  void *res = MmapFixedNoAccess(max_address, size_to_mmap, \"high gap\");\n  CHECK(res != MAP_FAILED);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_malloc_mac.inc",
    "content": "//===-- sanitizer_malloc_mac.inc --------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file contains Mac-specific malloc interceptors and a custom zone\n// implementation, which together replace the system allocator.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if !SANITIZER_MAC\n#error \"This file should only be compiled on Darwin.\"\n#endif\n\n#include <AvailabilityMacros.h>\n#include <CoreFoundation/CFBase.h>\n#include <dlfcn.h>\n#include <malloc/malloc.h>\n#include <sys/mman.h>\n\n#include \"interception/interception.h\"\n#include \"sanitizer_common/sanitizer_allocator_dlsym.h\"\n#include \"sanitizer_common/sanitizer_mac.h\"\n\n// Similar code is used in Google Perftools,\n// https://github.com/gperftools/gperftools.\n\nnamespace __sanitizer {\n\nextern malloc_zone_t sanitizer_zone;\n\nstruct sanitizer_malloc_introspection_t : public malloc_introspection_t {\n  // IMPORTANT: Do not change the order, alignment, or types of these fields to\n  // maintain binary compatibility. You should only add fields to this struct.\n\n  // Used to track changes to the allocator that will affect\n  // zone enumeration.\n  u64 allocator_enumeration_version;\n  uptr allocator_ptr;\n  uptr allocator_size;\n};\n\nu64 GetMallocZoneAllocatorEnumerationVersion() {\n  // This represents the current allocator ABI version.\n  // This field should be incremented every time the Allocator\n  // ABI changes in a way that breaks allocator enumeration.\n  return 0;\n}\n\n}  // namespace __sanitizer\n\nINTERCEPTOR(malloc_zone_t *, malloc_create_zone,\n                             vm_size_t start_size, unsigned zone_flags) {\n  COMMON_MALLOC_ENTER();\n  uptr page_size = GetPageSizeCached();\n  uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);\n  COMMON_MALLOC_MEMALIGN(page_size, allocated_size);\n  malloc_zone_t *new_zone = (malloc_zone_t *)p;\n  internal_memcpy(new_zone, &sanitizer_zone, sizeof(sanitizer_zone));\n  new_zone->zone_name = NULL;  // The name will be changed anyway.\n  // Prevent the client app from overwriting the zone contents.\n  // Library functions that need to modify the zone will set PROT_WRITE on it.\n  // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.\n  mprotect(new_zone, allocated_size, PROT_READ);\n  // We're explicitly *NOT* registering the zone.\n  return new_zone;\n}\n\nINTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {\n  COMMON_MALLOC_ENTER();\n  // We don't need to do anything here.  We're not registering new zones, so we\n  // don't to unregister.  Just un-mprotect and free() the zone.\n  uptr page_size = GetPageSizeCached();\n  uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);\n  mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);\n  if (zone->zone_name) {\n    COMMON_MALLOC_FREE((void *)zone->zone_name);\n  }\n  COMMON_MALLOC_FREE(zone);\n}\n\nINTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {\n  COMMON_MALLOC_ENTER();\n  return &sanitizer_zone;\n}\n\nINTERCEPTOR(malloc_zone_t *, malloc_zone_from_ptr, const void *ptr) {\n  COMMON_MALLOC_ENTER();\n  size_t size = sanitizer_zone.size(&sanitizer_zone, ptr);\n  if (size) { // Claimed by sanitizer zone?\n    return &sanitizer_zone;\n  }\n  return REAL(malloc_zone_from_ptr)(ptr);\n}\n\nINTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {\n  // FIXME: ASan should support purgeable allocations.\n  // https://github.com/google/sanitizers/issues/139\n  COMMON_MALLOC_ENTER();\n  return &sanitizer_zone;\n}\n\nINTERCEPTOR(void, malloc_make_purgeable, void *ptr) {\n  // FIXME: ASan should support purgeable allocations. Ignoring them is fine\n  // for now.\n  COMMON_MALLOC_ENTER();\n}\n\nINTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {\n  // FIXME: ASan should support purgeable allocations. Ignoring them is fine\n  // for now.\n  COMMON_MALLOC_ENTER();\n  // Must return 0 if the contents were not purged since the last call to\n  // malloc_make_purgeable().\n  return 0;\n}\n\nINTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {\n  COMMON_MALLOC_ENTER();\n  InternalScopedString new_name;\n  if (name && zone->introspect == sanitizer_zone.introspect) {\n    new_name.append(COMMON_MALLOC_ZONE_NAME \"-%s\", name);\n    name = new_name.data();\n  }\n\n  // Call the system malloc's implementation for both external and our zones,\n  // since that appropriately changes VM region protections on the zone.\n  REAL(malloc_set_zone_name)(zone, name);\n}\n\nINTERCEPTOR(void *, malloc, size_t size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_MALLOC(size);\n  return p;\n}\n\nINTERCEPTOR(void, free, void *ptr) {\n  COMMON_MALLOC_ENTER();\n  if (!ptr) return;\n  COMMON_MALLOC_FREE(ptr);\n}\n\nINTERCEPTOR(void *, realloc, void *ptr, size_t size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_REALLOC(ptr, size);\n  return p;\n}\n\nINTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_CALLOC(nmemb, size);\n  return p;\n}\n\nINTERCEPTOR(void *, valloc, size_t size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_VALLOC(size);\n  return p;\n}\n\nINTERCEPTOR(size_t, malloc_good_size, size_t size) {\n  COMMON_MALLOC_ENTER();\n  return sanitizer_zone.introspect->good_size(&sanitizer_zone, size);\n}\n\nINTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {\n  COMMON_MALLOC_ENTER();\n  CHECK(memptr);\n  COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size);\n  return res;\n}\n\nnamespace {\n\n// TODO(glider): the __sanitizer_mz_* functions should be united with the Linux\n// wrappers, as they are basically copied from there.\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nsize_t __sanitizer_mz_size(malloc_zone_t* zone, const void* ptr) {\n  COMMON_MALLOC_SIZE(ptr);\n  return size;\n}\n\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_MALLOC(size);\n  return p;\n}\n\nstruct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {\n  static bool UseImpl() { return !COMMON_MALLOC_SANITIZER_INITIALIZED; }\n};\n\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {\n  if (DlsymAlloc::Use())\n    return DlsymAlloc::Callocate(nmemb, size);\n  COMMON_MALLOC_CALLOC(nmemb, size);\n  return p;\n}\n\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__sanitizer_mz_valloc(malloc_zone_t *zone, size_t size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_VALLOC(size);\n  return p;\n}\n\n// TODO(glider): the allocation callbacks need to be refactored.\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {\n  if (!ptr) return;\n  if (DlsymAlloc::PointerIsMine(ptr))\n    return DlsymAlloc::Free(ptr);\n  COMMON_MALLOC_FREE(ptr);\n}\n\n#define GET_ZONE_FOR_PTR(ptr) \\\n  malloc_zone_t *zone_ptr = WRAP(malloc_zone_from_ptr)(ptr); \\\n  const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name\n\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__sanitizer_mz_realloc(malloc_zone_t *zone, void *ptr, size_t new_size) {\n  if (!ptr) {\n    COMMON_MALLOC_MALLOC(new_size);\n    return p;\n  } else {\n    COMMON_MALLOC_SIZE(ptr);\n    if (size) {\n      COMMON_MALLOC_REALLOC(ptr, new_size);\n      return p;\n    } else {\n      // We can't recover from reallocating an unknown address, because\n      // this would require reading at most |new_size| bytes from\n      // potentially unaccessible memory.\n      GET_ZONE_FOR_PTR(ptr);\n      COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name);\n      return nullptr;\n    }\n  }\n}\n\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid __sanitizer_mz_destroy(malloc_zone_t* zone) {\n  // A no-op -- we will not be destroyed!\n  Report(\"__sanitizer_mz_destroy() called -- ignoring\\n\");\n}\n\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid *__sanitizer_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {\n  COMMON_MALLOC_ENTER();\n  COMMON_MALLOC_MEMALIGN(align, size);\n  return p;\n}\n\n// This public API exists purely for testing purposes.\nextern \"C\"\nSANITIZER_INTERFACE_ATTRIBUTE\nmalloc_zone_t* __sanitizer_mz_default_zone() {\n  return &sanitizer_zone;\n}\n\n// This function is currently unused, and we build with -Werror.\n#if 0\nvoid __sanitizer_mz_free_definite_size(\n    malloc_zone_t* zone, void *ptr, size_t size) {\n  // TODO(glider): check that |size| is valid.\n  UNIMPLEMENTED();\n}\n#endif\n\n#ifndef COMMON_MALLOC_HAS_ZONE_ENUMERATOR\n#error \"COMMON_MALLOC_HAS_ZONE_ENUMERATOR must be defined\"\n#endif\nstatic_assert((COMMON_MALLOC_HAS_ZONE_ENUMERATOR) == 0 ||\n                  (COMMON_MALLOC_HAS_ZONE_ENUMERATOR) == 1,\n              \"COMMON_MALLOC_HAS_ZONE_ENUMERATOR must be 0 or 1\");\n\n#if COMMON_MALLOC_HAS_ZONE_ENUMERATOR\n// Forward declare and expect the implementation to provided by\n// includer.\nkern_return_t mi_enumerator(task_t task, void *, unsigned type_mask,\n                            vm_address_t zone_address, memory_reader_t reader,\n                            vm_range_recorder_t recorder);\n#else\n// Provide stub implementation that fails.\nkern_return_t mi_enumerator(task_t task, void *, unsigned type_mask,\n                            vm_address_t zone_address, memory_reader_t reader,\n                            vm_range_recorder_t recorder) {\n  // Not supported.\n  return KERN_FAILURE;\n}\n#endif\n\n#ifndef COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT\n#error \"COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT must be defined\"\n#endif\nstatic_assert((COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT) == 0 ||\n                  (COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT) == 1,\n              \"COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT must be 0 or 1\");\n#if COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT\n// Forward declare and expect the implementation to provided by\n// includer.\nvoid mi_extra_init(\n    sanitizer_malloc_introspection_t *mi);\n#else\nvoid mi_extra_init(\n    sanitizer_malloc_introspection_t *mi) {\n  // Just zero initialize the fields.\n  mi->allocator_ptr = 0;\n  mi->allocator_size = 0;\n}\n#endif\n\nsize_t mi_good_size(malloc_zone_t *zone, size_t size) {\n  // I think it's always safe to return size, but we maybe could do better.\n  return size;\n}\n\nboolean_t mi_check(malloc_zone_t *zone) {\n  UNIMPLEMENTED();\n}\n\nvoid mi_print(malloc_zone_t *zone, boolean_t verbose) {\n  UNIMPLEMENTED();\n}\n\nvoid mi_log(malloc_zone_t *zone, void *address) {\n  // I don't think we support anything like this\n}\n\nvoid mi_force_lock(malloc_zone_t *zone) {\n  COMMON_MALLOC_FORCE_LOCK();\n}\n\nvoid mi_force_unlock(malloc_zone_t *zone) {\n  COMMON_MALLOC_FORCE_UNLOCK();\n}\n\nvoid mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {\n  COMMON_MALLOC_FILL_STATS(zone, stats);\n}\n\nboolean_t mi_zone_locked(malloc_zone_t *zone) {\n  // UNIMPLEMENTED();\n  return false;\n}\n\n}  // unnamed namespace\n\nnamespace COMMON_MALLOC_NAMESPACE {\n\nvoid InitMallocZoneFields() {\n  static sanitizer_malloc_introspection_t sanitizer_zone_introspection;\n  // Ok to use internal_memset, these places are not performance-critical.\n  internal_memset(&sanitizer_zone_introspection, 0,\n                  sizeof(sanitizer_zone_introspection));\n\n  sanitizer_zone_introspection.enumerator = &mi_enumerator;\n  sanitizer_zone_introspection.good_size = &mi_good_size;\n  sanitizer_zone_introspection.check = &mi_check;\n  sanitizer_zone_introspection.print = &mi_print;\n  sanitizer_zone_introspection.log = &mi_log;\n  sanitizer_zone_introspection.force_lock = &mi_force_lock;\n  sanitizer_zone_introspection.force_unlock = &mi_force_unlock;\n  sanitizer_zone_introspection.statistics = &mi_statistics;\n  sanitizer_zone_introspection.zone_locked = &mi_zone_locked;\n\n  // Set current allocator enumeration version.\n  sanitizer_zone_introspection.allocator_enumeration_version =\n      GetMallocZoneAllocatorEnumerationVersion();\n\n  // Perform any sanitizer specific initialization.\n  mi_extra_init(&sanitizer_zone_introspection);\n\n  internal_memset(&sanitizer_zone, 0, sizeof(malloc_zone_t));\n\n  // Use version 6 for OSX >= 10.6.\n  sanitizer_zone.version = 6;\n  sanitizer_zone.zone_name = COMMON_MALLOC_ZONE_NAME;\n  sanitizer_zone.size = &__sanitizer_mz_size;\n  sanitizer_zone.malloc = &__sanitizer_mz_malloc;\n  sanitizer_zone.calloc = &__sanitizer_mz_calloc;\n  sanitizer_zone.valloc = &__sanitizer_mz_valloc;\n  sanitizer_zone.free = &__sanitizer_mz_free;\n  sanitizer_zone.realloc = &__sanitizer_mz_realloc;\n  sanitizer_zone.destroy = &__sanitizer_mz_destroy;\n  sanitizer_zone.batch_malloc = 0;\n  sanitizer_zone.batch_free = 0;\n  sanitizer_zone.free_definite_size = 0;\n  sanitizer_zone.memalign = &__sanitizer_mz_memalign;\n  sanitizer_zone.introspect = &sanitizer_zone_introspection;\n}\n\nvoid ReplaceSystemMalloc() {\n  InitMallocZoneFields();\n\n  // Register the zone.\n  malloc_zone_register(&sanitizer_zone);\n}\n\n}  // namespace COMMON_MALLOC_NAMESPACE\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_mutex.cpp",
    "content": "//===-- sanitizer_mutex.cpp -----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_mutex.h\"\n\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\nvoid StaticSpinMutex::LockSlow() {\n  for (int i = 0;; i++) {\n    if (i < 100)\n      proc_yield(1);\n    else\n      internal_sched_yield();\n    if (atomic_load(&state_, memory_order_relaxed) == 0 &&\n        atomic_exchange(&state_, 1, memory_order_acquire) == 0)\n      return;\n  }\n}\n\nvoid Semaphore::Wait() {\n  u32 count = atomic_load(&state_, memory_order_relaxed);\n  for (;;) {\n    if (count == 0) {\n      FutexWait(&state_, 0);\n      count = atomic_load(&state_, memory_order_relaxed);\n      continue;\n    }\n    if (atomic_compare_exchange_weak(&state_, &count, count - 1,\n                                     memory_order_acquire))\n      break;\n  }\n}\n\nvoid Semaphore::Post(u32 count) {\n  CHECK_NE(count, 0);\n  atomic_fetch_add(&state_, count, memory_order_release);\n  FutexWake(&state_, count);\n}\n\n#if SANITIZER_CHECK_DEADLOCKS\n// An empty mutex meta table, it effectively disables deadlock detection.\n// Each tool can override the table to define own mutex hierarchy and\n// enable deadlock detection.\n// The table defines a static mutex type hierarchy (what mutex types can be locked\n// under what mutex types). This table is checked to be acyclic and then\n// actual mutex lock/unlock operations are checked to adhere to this hierarchy.\n// The checking happens on mutex types rather than on individual mutex instances\n// because doing it on mutex instances will both significantly complicate\n// the implementation, worsen performance and memory overhead and is mostly\n// unnecessary (we almost never lock multiple mutexes of the same type recursively).\nstatic constexpr int kMutexTypeMax = 20;\nSANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};\nSANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}\nstatic StaticSpinMutex mutex_meta_mtx;\nstatic int mutex_type_count = -1;\n// Adjacency matrix of what mutexes can be locked under what mutexes.\nstatic bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];\n// Mutex types with MutexMulti mark.\nstatic bool mutex_multi[kMutexTypeMax];\n\nvoid DebugMutexInit() {\n  // Build adjacency matrix.\n  bool leaf[kMutexTypeMax];\n  internal_memset(&leaf, 0, sizeof(leaf));\n  int cnt[kMutexTypeMax];\n  internal_memset(&cnt, 0, sizeof(cnt));\n  for (int t = 0; t < kMutexTypeMax; t++) {\n    mutex_type_count = t;\n    if (!mutex_meta[t].name)\n      break;\n    CHECK_EQ(t, mutex_meta[t].type);\n    for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {\n      MutexType z = mutex_meta[t].can_lock[j];\n      if (z == MutexInvalid)\n        break;\n      if (z == MutexLeaf) {\n        CHECK(!leaf[t]);\n        leaf[t] = true;\n        continue;\n      }\n      if (z == MutexMulti) {\n        mutex_multi[t] = true;\n        continue;\n      }\n      CHECK_LT(z, kMutexTypeMax);\n      CHECK(!mutex_can_lock[t][z]);\n      mutex_can_lock[t][z] = true;\n      cnt[t]++;\n    }\n  }\n  // Indicates the array is not properly terminated.\n  CHECK_LT(mutex_type_count, kMutexTypeMax);\n  // Add leaf mutexes.\n  for (int t = 0; t < mutex_type_count; t++) {\n    if (!leaf[t])\n      continue;\n    CHECK_EQ(cnt[t], 0);\n    for (int z = 0; z < mutex_type_count; z++) {\n      if (z == MutexInvalid || t == z || leaf[z])\n        continue;\n      CHECK(!mutex_can_lock[z][t]);\n      mutex_can_lock[z][t] = true;\n    }\n  }\n  // Build the transitive closure and check that the graphs is acyclic.\n  u32 trans[kMutexTypeMax];\n  static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,\n                \"kMutexTypeMax does not fit into u32, switch to u64\");\n  internal_memset(&trans, 0, sizeof(trans));\n  for (int i = 0; i < mutex_type_count; i++) {\n    for (int j = 0; j < mutex_type_count; j++)\n      if (mutex_can_lock[i][j])\n        trans[i] |= 1 << j;\n  }\n  for (int k = 0; k < mutex_type_count; k++) {\n    for (int i = 0; i < mutex_type_count; i++) {\n      if (trans[i] & (1 << k))\n        trans[i] |= trans[k];\n    }\n  }\n  for (int i = 0; i < mutex_type_count; i++) {\n    if (trans[i] & (1 << i)) {\n      Printf(\"Mutex %s participates in a cycle\\n\", mutex_meta[i].name);\n      Die();\n    }\n  }\n}\n\nstruct InternalDeadlockDetector {\n  struct LockDesc {\n    u64 seq;\n    uptr pc;\n    int recursion;\n  };\n  int initialized;\n  u64 sequence;\n  LockDesc locked[kMutexTypeMax];\n\n  void Lock(MutexType type, uptr pc) {\n    if (!Initialize(type))\n      return;\n    CHECK_LT(type, mutex_type_count);\n    // Find the last locked mutex type.\n    // This is the type we will use for hierarchy checks.\n    u64 max_seq = 0;\n    MutexType max_idx = MutexInvalid;\n    for (int i = 0; i != mutex_type_count; i++) {\n      if (locked[i].seq == 0)\n        continue;\n      CHECK_NE(locked[i].seq, max_seq);\n      if (max_seq < locked[i].seq) {\n        max_seq = locked[i].seq;\n        max_idx = (MutexType)i;\n      }\n    }\n    if (max_idx == type && mutex_multi[type]) {\n      // Recursive lock of the same type.\n      CHECK_EQ(locked[type].seq, max_seq);\n      CHECK(locked[type].pc);\n      locked[type].recursion++;\n      return;\n    }\n    if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {\n      Printf(\"%s: internal deadlock: can't lock %s under %s mutex\\n\", SanitizerToolName,\n             mutex_meta[type].name, mutex_meta[max_idx].name);\n      PrintMutexPC(locked[max_idx].pc);\n      CHECK(0);\n    }\n    locked[type].seq = ++sequence;\n    locked[type].pc = pc;\n    locked[type].recursion = 1;\n  }\n\n  void Unlock(MutexType type) {\n    if (!Initialize(type))\n      return;\n    CHECK_LT(type, mutex_type_count);\n    CHECK(locked[type].seq);\n    CHECK_GT(locked[type].recursion, 0);\n    if (--locked[type].recursion)\n      return;\n    locked[type].seq = 0;\n    locked[type].pc = 0;\n  }\n\n  void CheckNoLocks() {\n    for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);\n  }\n\n  bool Initialize(MutexType type) {\n    if (type == MutexUnchecked || type == MutexInvalid)\n      return false;\n    CHECK_GT(type, MutexInvalid);\n    if (initialized != 0)\n      return initialized > 0;\n    initialized = -1;\n    SpinMutexLock lock(&mutex_meta_mtx);\n    if (mutex_type_count < 0)\n      DebugMutexInit();\n    initialized = mutex_type_count ? 1 : -1;\n    return initialized > 0;\n  }\n};\n\nstatic THREADLOCAL InternalDeadlockDetector deadlock_detector;\n\nvoid CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }\n\nvoid CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }\n\nvoid CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }\n#endif\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_mutex.h",
    "content": "//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_MUTEX_H\n#define SANITIZER_MUTEX_H\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_thread_safety.h\"\n\nnamespace __sanitizer {\n\nclass SANITIZER_MUTEX StaticSpinMutex {\n public:\n  void Init() {\n    atomic_store(&state_, 0, memory_order_relaxed);\n  }\n\n  void Lock() SANITIZER_ACQUIRE() {\n    if (LIKELY(TryLock()))\n      return;\n    LockSlow();\n  }\n\n  bool TryLock() SANITIZER_TRY_ACQUIRE(true) {\n    return atomic_exchange(&state_, 1, memory_order_acquire) == 0;\n  }\n\n  void Unlock() SANITIZER_RELEASE() {\n    atomic_store(&state_, 0, memory_order_release);\n  }\n\n  void CheckLocked() const SANITIZER_CHECK_LOCKED() {\n    CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);\n  }\n\n private:\n  atomic_uint8_t state_;\n\n  void LockSlow();\n};\n\nclass SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {\n public:\n  SpinMutex() {\n    Init();\n  }\n\n  SpinMutex(const SpinMutex &) = delete;\n  void operator=(const SpinMutex &) = delete;\n};\n\n// Semaphore provides an OS-dependent way to park/unpark threads.\n// The last thread returned from Wait can destroy the object\n// (destruction-safety).\nclass Semaphore {\n public:\n  constexpr Semaphore() {}\n  Semaphore(const Semaphore &) = delete;\n  void operator=(const Semaphore &) = delete;\n\n  void Wait();\n  void Post(u32 count = 1);\n\n private:\n  atomic_uint32_t state_ = {0};\n};\n\ntypedef int MutexType;\n\nenum {\n  // Used as sentinel and to catch unassigned types\n  // (should not be used as real Mutex type).\n  MutexInvalid = 0,\n  MutexThreadRegistry,\n  // Each tool own mutexes must start at this number.\n  MutexLastCommon,\n  // Type for legacy mutexes that are not checked for deadlocks.\n  MutexUnchecked = -1,\n  // Special marks that can be used in MutexMeta::can_lock table.\n  // The leaf mutexes can be locked under any other non-leaf mutex,\n  // but no other mutex can be locked while under a leaf mutex.\n  MutexLeaf = -1,\n  // Multiple mutexes of this type can be locked at the same time.\n  MutexMulti = -3,\n};\n\n// Go linker does not support THREADLOCAL variables,\n// so we can't use per-thread state.\n// Disable checked locks on Darwin. Although Darwin platforms support\n// THREADLOCAL variables they are not usable early on during process init when\n// `__sanitizer::Mutex` is used.\n#define SANITIZER_CHECK_DEADLOCKS \\\n  (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_MAC)\n\n#if SANITIZER_CHECK_DEADLOCKS\nstruct MutexMeta {\n  MutexType type;\n  const char *name;\n  // The table fixes what mutexes can be locked under what mutexes.\n  // If the entry for MutexTypeFoo contains MutexTypeBar,\n  // then Bar mutex can be locked while under Foo mutex.\n  // Can also contain the special MutexLeaf/MutexMulti marks.\n  MutexType can_lock[10];\n};\n#endif\n\nclass CheckedMutex {\n public:\n  explicit constexpr CheckedMutex(MutexType type)\n#if SANITIZER_CHECK_DEADLOCKS\n      : type_(type)\n#endif\n  {\n  }\n\n  ALWAYS_INLINE void Lock() {\n#if SANITIZER_CHECK_DEADLOCKS\n    LockImpl(GET_CALLER_PC());\n#endif\n  }\n\n  ALWAYS_INLINE void Unlock() {\n#if SANITIZER_CHECK_DEADLOCKS\n    UnlockImpl();\n#endif\n  }\n\n  // Checks that the current thread does not hold any mutexes\n  // (e.g. when returning from a runtime function to user code).\n  static void CheckNoLocks() {\n#if SANITIZER_CHECK_DEADLOCKS\n    CheckNoLocksImpl();\n#endif\n  }\n\n private:\n#if SANITIZER_CHECK_DEADLOCKS\n  const MutexType type_;\n\n  void LockImpl(uptr pc);\n  void UnlockImpl();\n  static void CheckNoLocksImpl();\n#endif\n};\n\n// Reader-writer mutex.\n// Derive from CheckedMutex for the purposes of EBO.\n// We could make it a field marked with [[no_unique_address]],\n// but this attribute is not supported by some older compilers.\nclass SANITIZER_MUTEX Mutex : CheckedMutex {\n public:\n  explicit constexpr Mutex(MutexType type = MutexUnchecked)\n      : CheckedMutex(type) {}\n\n  void Lock() SANITIZER_ACQUIRE() {\n    CheckedMutex::Lock();\n    u64 reset_mask = ~0ull;\n    u64 state = atomic_load_relaxed(&state_);\n    for (uptr spin_iters = 0;; spin_iters++) {\n      u64 new_state;\n      bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;\n      if (LIKELY(!locked)) {\n        // The mutex is not read-/write-locked, try to lock.\n        new_state = (state | kWriterLock) & reset_mask;\n      } else if (spin_iters > kMaxSpinIters) {\n        // We've spun enough, increment waiting writers count and block.\n        // The counter will be decremented by whoever wakes us.\n        new_state = (state + kWaitingWriterInc) & reset_mask;\n      } else if ((state & kWriterSpinWait) == 0) {\n        // Active spinning, but denote our presence so that unlocking\n        // thread does not wake up other threads.\n        new_state = state | kWriterSpinWait;\n      } else {\n        // Active spinning.\n        state = atomic_load(&state_, memory_order_relaxed);\n        continue;\n      }\n      if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,\n                                                 memory_order_acquire)))\n        continue;\n      if (LIKELY(!locked))\n        return;  // We've locked the mutex.\n      if (spin_iters > kMaxSpinIters) {\n        // We've incremented waiting writers, so now block.\n        writers_.Wait();\n        spin_iters = 0;\n      } else {\n        // We've set kWriterSpinWait, but we are still in active spinning.\n      }\n      // We either blocked and were unblocked,\n      // or we just spun but set kWriterSpinWait.\n      // Either way we need to reset kWriterSpinWait\n      // next time we take the lock or block again.\n      reset_mask = ~kWriterSpinWait;\n      state = atomic_load(&state_, memory_order_relaxed);\n      DCHECK_NE(state & kWriterSpinWait, 0);\n    }\n  }\n\n  void Unlock() SANITIZER_RELEASE() {\n    CheckedMutex::Unlock();\n    bool wake_writer;\n    u64 wake_readers;\n    u64 new_state;\n    u64 state = atomic_load_relaxed(&state_);\n    do {\n      DCHECK_NE(state & kWriterLock, 0);\n      DCHECK_EQ(state & kReaderLockMask, 0);\n      new_state = state & ~kWriterLock;\n      wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&\n                    (state & kWaitingWriterMask) != 0;\n      if (wake_writer)\n        new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;\n      wake_readers =\n          wake_writer || (state & kWriterSpinWait) != 0\n              ? 0\n              : ((state & kWaitingReaderMask) >> kWaitingReaderShift);\n      if (wake_readers)\n        new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;\n    } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,\n                                                    memory_order_release)));\n    if (UNLIKELY(wake_writer))\n      writers_.Post();\n    else if (UNLIKELY(wake_readers))\n      readers_.Post(wake_readers);\n  }\n\n  void ReadLock() SANITIZER_ACQUIRE_SHARED() {\n    CheckedMutex::Lock();\n    u64 reset_mask = ~0ull;\n    u64 state = atomic_load_relaxed(&state_);\n    for (uptr spin_iters = 0;; spin_iters++) {\n      bool locked = (state & kWriterLock) != 0;\n      u64 new_state;\n      if (LIKELY(!locked)) {\n        new_state = (state + kReaderLockInc) & reset_mask;\n      } else if (spin_iters > kMaxSpinIters) {\n        new_state = (state + kWaitingReaderInc) & reset_mask;\n      } else if ((state & kReaderSpinWait) == 0) {\n        // Active spinning, but denote our presence so that unlocking\n        // thread does not wake up other threads.\n        new_state = state | kReaderSpinWait;\n      } else {\n        // Active spinning.\n        state = atomic_load(&state_, memory_order_relaxed);\n        continue;\n      }\n      if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,\n                                                 memory_order_acquire)))\n        continue;\n      if (LIKELY(!locked))\n        return;  // We've locked the mutex.\n      if (spin_iters > kMaxSpinIters) {\n        // We've incremented waiting readers, so now block.\n        readers_.Wait();\n        spin_iters = 0;\n      } else {\n        // We've set kReaderSpinWait, but we are still in active spinning.\n      }\n      reset_mask = ~kReaderSpinWait;\n      state = atomic_load(&state_, memory_order_relaxed);\n    }\n  }\n\n  void ReadUnlock() SANITIZER_RELEASE_SHARED() {\n    CheckedMutex::Unlock();\n    bool wake;\n    u64 new_state;\n    u64 state = atomic_load_relaxed(&state_);\n    do {\n      DCHECK_NE(state & kReaderLockMask, 0);\n      DCHECK_EQ(state & kWriterLock, 0);\n      new_state = state - kReaderLockInc;\n      wake = (new_state &\n              (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&\n             (new_state & kWaitingWriterMask) != 0;\n      if (wake)\n        new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;\n    } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,\n                                                    memory_order_release)));\n    if (UNLIKELY(wake))\n      writers_.Post();\n  }\n\n  // This function does not guarantee an explicit check that the calling thread\n  // is the thread which owns the mutex. This behavior, while more strictly\n  // correct, causes problems in cases like StopTheWorld, where a parent thread\n  // owns the mutex but a child checks that it is locked. Rather than\n  // maintaining complex state to work around those situations, the check only\n  // checks that the mutex is owned.\n  void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {\n    CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);\n  }\n\n  void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }\n\n  void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {\n    CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);\n  }\n\n private:\n  atomic_uint64_t state_ = {0};\n  Semaphore writers_;\n  Semaphore readers_;\n\n  // The state has 3 counters:\n  //  - number of readers holding the lock,\n  //    if non zero, the mutex is read-locked\n  //  - number of waiting readers,\n  //    if not zero, the mutex is write-locked\n  //  - number of waiting writers,\n  //    if non zero, the mutex is read- or write-locked\n  // And 2 flags:\n  //  - writer lock\n  //    if set, the mutex is write-locked\n  //  - a writer is awake and spin-waiting\n  //    the flag is used to prevent thundering herd problem\n  //    (new writers are not woken if this flag is set)\n  //  - a reader is awake and spin-waiting\n  //\n  // Both writers and readers use active spinning before blocking.\n  // But readers are more aggressive and always take the mutex\n  // if there are any other readers.\n  // After wake up both writers and readers compete to lock the\n  // mutex again. This is needed to allow repeated locks even in presence\n  // of other blocked threads.\n  static constexpr u64 kCounterWidth = 20;\n  static constexpr u64 kReaderLockShift = 0;\n  static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;\n  static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)\n                                         << kReaderLockShift;\n  static constexpr u64 kWaitingReaderShift = kCounterWidth;\n  static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;\n  static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)\n                                            << kWaitingReaderShift;\n  static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;\n  static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;\n  static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)\n                                            << kWaitingWriterShift;\n  static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);\n  static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);\n  static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);\n\n  static constexpr uptr kMaxSpinIters = 1500;\n\n  Mutex(LinkerInitialized) = delete;\n  Mutex(const Mutex &) = delete;\n  void operator=(const Mutex &) = delete;\n};\n\nvoid FutexWait(atomic_uint32_t *p, u32 cmp);\nvoid FutexWake(atomic_uint32_t *p, u32 count);\n\ntemplate <typename MutexType>\nclass SANITIZER_SCOPED_LOCK GenericScopedLock {\n public:\n  explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {\n    mu_->Lock();\n  }\n\n  ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }\n\n private:\n  MutexType *mu_;\n\n  GenericScopedLock(const GenericScopedLock &) = delete;\n  void operator=(const GenericScopedLock &) = delete;\n};\n\ntemplate <typename MutexType>\nclass SANITIZER_SCOPED_LOCK GenericScopedReadLock {\n public:\n  explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)\n      : mu_(mu) {\n    mu_->ReadLock();\n  }\n\n  ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }\n\n private:\n  MutexType *mu_;\n\n  GenericScopedReadLock(const GenericScopedReadLock &) = delete;\n  void operator=(const GenericScopedReadLock &) = delete;\n};\n\ntemplate <typename MutexType>\nclass SANITIZER_SCOPED_LOCK GenericScopedRWLock {\n public:\n  ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)\n      SANITIZER_ACQUIRE(mu)\n      : mu_(mu), write_(write) {\n    if (write_)\n      mu_->Lock();\n    else\n      mu_->ReadLock();\n  }\n\n  ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {\n    if (write_)\n      mu_->Unlock();\n    else\n      mu_->ReadUnlock();\n  }\n\n private:\n  MutexType *mu_;\n  bool write_;\n\n  GenericScopedRWLock(const GenericScopedRWLock &) = delete;\n  void operator=(const GenericScopedRWLock &) = delete;\n};\n\ntypedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;\ntypedef GenericScopedLock<Mutex> Lock;\ntypedef GenericScopedReadLock<Mutex> ReadLock;\ntypedef GenericScopedRWLock<Mutex> RWLock;\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MUTEX_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_netbsd.cpp",
    "content": "//===-- sanitizer_netbsd.cpp ----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between Sanitizer run-time libraries and implements\n// NetBSD-specific functions from sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_NETBSD\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_getauxval.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_procmaps.h\"\n\n#include <sys/param.h>\n#include <sys/types.h>\n\n#include <sys/exec.h>\n#include <sys/mman.h>\n#include <sys/ptrace.h>\n#include <sys/resource.h>\n#include <sys/stat.h>\n#include <sys/syscall.h>\n#include <sys/sysctl.h>\n#include <sys/time.h>\n\n#include <dlfcn.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <limits.h>\n#include <link.h>\n#include <lwp.h>\n#include <pthread.h>\n#include <sched.h>\n#include <signal.h>\n#include <ucontext.h>\n#include <unistd.h>\n\nextern \"C\" void *__mmap(void *, size_t, int, int, int, int,\n                        off_t) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int __sysctl(const int *, unsigned int, void *, size_t *,\n                        const void *, size_t) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys_close(int) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys_open(const char *, int, ...) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" ssize_t _sys_read(int, void *, size_t) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" ssize_t _sys_write(int, const void *,\n                              size_t) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int __ftruncate(int, int, off_t) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" ssize_t _sys_readlink(const char *, char *,\n                                 size_t) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys_sched_yield() SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys___nanosleep50(const void *,\n                                  void *) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys_execve(const char *, char *const[],\n                           char *const[]) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" off_t __lseek(int, int, off_t, int) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int __fork() SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys___sigprocmask14(int, const void *,\n                                    void *) SANITIZER_WEAK_ATTRIBUTE;\nextern \"C\" int _sys___wait450(int wpid, int *, int,\n                              void *) SANITIZER_WEAK_ATTRIBUTE;\n\nnamespace __sanitizer {\n\nstatic void *GetRealLibcAddress(const char *symbol) {\n  void *real = dlsym(RTLD_NEXT, symbol);\n  if (!real)\n    real = dlsym(RTLD_DEFAULT, symbol);\n  if (!real) {\n    Printf(\"GetRealLibcAddress failed for symbol=%s\", symbol);\n    Die();\n  }\n  return real;\n}\n\n#define _REAL(func, ...) real##_##func(__VA_ARGS__)\n#define DEFINE__REAL(ret_type, func, ...)                              \\\n  static ret_type (*real_##func)(__VA_ARGS__) = NULL;                  \\\n  if (!real_##func) {                                                  \\\n    real_##func = (ret_type(*)(__VA_ARGS__))GetRealLibcAddress(#func); \\\n  }                                                                    \\\n  CHECK(real_##func);\n\n// --------------- sanitizer_libc.h\nuptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,\n                   u64 offset) {\n  CHECK(&__mmap);\n  return (uptr)__mmap(addr, length, prot, flags, fd, 0, offset);\n}\n\nuptr internal_munmap(void *addr, uptr length) {\n  DEFINE__REAL(int, munmap, void *a, uptr b);\n  return _REAL(munmap, addr, length);\n}\n\nuptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,\n                     void *new_address) {\n  CHECK(false && \"internal_mremap is unimplemented on NetBSD\");\n  return 0;\n}\n\nint internal_mprotect(void *addr, uptr length, int prot) {\n  DEFINE__REAL(int, mprotect, void *a, uptr b, int c);\n  return _REAL(mprotect, addr, length, prot);\n}\n\nint internal_madvise(uptr addr, uptr length, int advice) {\n  DEFINE__REAL(int, madvise, void *a, uptr b, int c);\n  return _REAL(madvise, (void *)addr, length, advice);\n}\n\nuptr internal_close(fd_t fd) {\n  CHECK(&_sys_close);\n  return _sys_close(fd);\n}\n\nuptr internal_open(const char *filename, int flags) {\n  CHECK(&_sys_open);\n  return _sys_open(filename, flags);\n}\n\nuptr internal_open(const char *filename, int flags, u32 mode) {\n  CHECK(&_sys_open);\n  return _sys_open(filename, flags, mode);\n}\n\nuptr internal_read(fd_t fd, void *buf, uptr count) {\n  sptr res;\n  CHECK(&_sys_read);\n  HANDLE_EINTR(res, (sptr)_sys_read(fd, buf, (size_t)count));\n  return res;\n}\n\nuptr internal_write(fd_t fd, const void *buf, uptr count) {\n  sptr res;\n  CHECK(&_sys_write);\n  HANDLE_EINTR(res, (sptr)_sys_write(fd, buf, count));\n  return res;\n}\n\nuptr internal_ftruncate(fd_t fd, uptr size) {\n  sptr res;\n  CHECK(&__ftruncate);\n  HANDLE_EINTR(res, __ftruncate(fd, 0, (s64)size));\n  return res;\n}\n\nuptr internal_stat(const char *path, void *buf) {\n  DEFINE__REAL(int, __stat50, const char *a, void *b);\n  return _REAL(__stat50, path, buf);\n}\n\nuptr internal_lstat(const char *path, void *buf) {\n  DEFINE__REAL(int, __lstat50, const char *a, void *b);\n  return _REAL(__lstat50, path, buf);\n}\n\nuptr internal_fstat(fd_t fd, void *buf) {\n  DEFINE__REAL(int, __fstat50, int a, void *b);\n  return _REAL(__fstat50, fd, buf);\n}\n\nuptr internal_filesize(fd_t fd) {\n  struct stat st;\n  if (internal_fstat(fd, &st))\n    return -1;\n  return (uptr)st.st_size;\n}\n\nuptr internal_dup(int oldfd) {\n  DEFINE__REAL(int, dup, int a);\n  return _REAL(dup, oldfd);\n}\n\nuptr internal_dup2(int oldfd, int newfd) {\n  DEFINE__REAL(int, dup2, int a, int b);\n  return _REAL(dup2, oldfd, newfd);\n}\n\nuptr internal_readlink(const char *path, char *buf, uptr bufsize) {\n  CHECK(&_sys_readlink);\n  return (uptr)_sys_readlink(path, buf, bufsize);\n}\n\nuptr internal_unlink(const char *path) {\n  DEFINE__REAL(int, unlink, const char *a);\n  return _REAL(unlink, path);\n}\n\nuptr internal_rename(const char *oldpath, const char *newpath) {\n  DEFINE__REAL(int, rename, const char *a, const char *b);\n  return _REAL(rename, oldpath, newpath);\n}\n\nuptr internal_sched_yield() {\n  CHECK(&_sys_sched_yield);\n  return _sys_sched_yield();\n}\n\nvoid internal__exit(int exitcode) {\n  DEFINE__REAL(void, _exit, int a);\n  _REAL(_exit, exitcode);\n  Die();  // Unreachable.\n}\n\nvoid internal_usleep(u64 useconds) {\n  struct timespec ts;\n  ts.tv_sec = useconds / 1000000;\n  ts.tv_nsec = (useconds % 1000000) * 1000;\n  CHECK(&_sys___nanosleep50);\n  _sys___nanosleep50(&ts, &ts);\n}\n\nuptr internal_execve(const char *filename, char *const argv[],\n                     char *const envp[]) {\n  CHECK(&_sys_execve);\n  return _sys_execve(filename, argv, envp);\n}\n\ntid_t GetTid() {\n  DEFINE__REAL(int, _lwp_self);\n  return _REAL(_lwp_self);\n}\n\nint TgKill(pid_t pid, tid_t tid, int sig) {\n  DEFINE__REAL(int, _lwp_kill, int a, int b);\n  (void)pid;\n  return _REAL(_lwp_kill, tid, sig);\n}\n\nu64 NanoTime() {\n  timeval tv;\n  DEFINE__REAL(int, __gettimeofday50, void *a, void *b);\n  internal_memset(&tv, 0, sizeof(tv));\n  _REAL(__gettimeofday50, &tv, 0);\n  return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;\n}\n\nuptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {\n  DEFINE__REAL(int, __clock_gettime50, __sanitizer_clockid_t a, void *b);\n  return _REAL(__clock_gettime50, clk_id, tp);\n}\n\nuptr internal_ptrace(int request, int pid, void *addr, int data) {\n  DEFINE__REAL(int, ptrace, int a, int b, void *c, int d);\n  return _REAL(ptrace, request, pid, addr, data);\n}\n\nuptr internal_waitpid(int pid, int *status, int options) {\n  CHECK(&_sys___wait450);\n  return _sys___wait450(pid, status, options, 0 /* rusage */);\n}\n\nuptr internal_getpid() {\n  DEFINE__REAL(int, getpid);\n  return _REAL(getpid);\n}\n\nuptr internal_getppid() {\n  DEFINE__REAL(int, getppid);\n  return _REAL(getppid);\n}\n\nint internal_dlinfo(void *handle, int request, void *p) {\n  DEFINE__REAL(int, dlinfo, void *a, int b, void *c);\n  return _REAL(dlinfo, handle, request, p);\n}\n\nuptr internal_getdents(fd_t fd, void *dirp, unsigned int count) {\n  DEFINE__REAL(int, __getdents30, int a, void *b, size_t c);\n  return _REAL(__getdents30, fd, dirp, count);\n}\n\nuptr internal_lseek(fd_t fd, OFF_T offset, int whence) {\n  CHECK(&__lseek);\n  return __lseek(fd, 0, offset, whence);\n}\n\nuptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {\n  Printf(\"internal_prctl not implemented for NetBSD\");\n  Die();\n  return 0;\n}\n\nuptr internal_sigaltstack(const void *ss, void *oss) {\n  DEFINE__REAL(int, __sigaltstack14, const void *a, void *b);\n  return _REAL(__sigaltstack14, ss, oss);\n}\n\nint internal_fork() {\n  CHECK(&__fork);\n  return __fork();\n}\n\nint internal_sysctl(const int *name, unsigned int namelen, void *oldp,\n                    uptr *oldlenp, const void *newp, uptr newlen) {\n  CHECK(&__sysctl);\n  return __sysctl(name, namelen, oldp, (size_t *)oldlenp, newp, (size_t)newlen);\n}\n\nint internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,\n                          const void *newp, uptr newlen) {\n  DEFINE__REAL(int, sysctlbyname, const char *a, void *b, size_t *c,\n               const void *d, size_t e);\n  return _REAL(sysctlbyname, sname, oldp, (size_t *)oldlenp, newp,\n               (size_t)newlen);\n}\n\nuptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,\n                          __sanitizer_sigset_t *oldset) {\n  CHECK(&_sys___sigprocmask14);\n  return _sys___sigprocmask14(how, set, oldset);\n}\n\nvoid internal_sigfillset(__sanitizer_sigset_t *set) {\n  DEFINE__REAL(int, __sigfillset14, const void *a);\n  (void)_REAL(__sigfillset14, set);\n}\n\nvoid internal_sigemptyset(__sanitizer_sigset_t *set) {\n  DEFINE__REAL(int, __sigemptyset14, const void *a);\n  (void)_REAL(__sigemptyset14, set);\n}\n\nvoid internal_sigdelset(__sanitizer_sigset_t *set, int signo) {\n  DEFINE__REAL(int, __sigdelset14, const void *a, int b);\n  (void)_REAL(__sigdelset14, set, signo);\n}\n\nuptr internal_clone(int (*fn)(void *), void *child_stack, int flags,\n                    void *arg) {\n  DEFINE__REAL(int, clone, int (*a)(void *b), void *c, int d, void *e);\n\n  return _REAL(clone, fn, child_stack, flags, arg);\n}\n\n}  // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_openbsd.cpp",
    "content": ""
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_persistent_allocator.cpp",
    "content": "//===-- sanitizer_persistent_allocator.cpp ----------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_persistent_allocator.h\"\n\nnamespace __sanitizer {\n\nPersistentAllocator thePersistentAllocator;\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_persistent_allocator.h",
    "content": "//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// A fast memory allocator that does not support free() nor realloc().\n// All allocations are forever.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H\n#define SANITIZER_PERSISTENT_ALLOCATOR_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\nclass PersistentAllocator {\n public:\n  void *alloc(uptr size);\n\n private:\n  void *tryAlloc(uptr size);\n  StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.\n  atomic_uintptr_t region_pos;  // Region allocator for Node's.\n  atomic_uintptr_t region_end;\n};\n\ninline void *PersistentAllocator::tryAlloc(uptr size) {\n  // Optimisic lock-free allocation, essentially try to bump the region ptr.\n  for (;;) {\n    uptr cmp = atomic_load(&region_pos, memory_order_acquire);\n    uptr end = atomic_load(&region_end, memory_order_acquire);\n    if (cmp == 0 || cmp + size > end) return nullptr;\n    if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,\n                                     memory_order_acquire))\n      return (void *)cmp;\n  }\n}\n\ninline void *PersistentAllocator::alloc(uptr size) {\n  // First, try to allocate optimisitically.\n  void *s = tryAlloc(size);\n  if (s) return s;\n  // If failed, lock, retry and alloc new superblock.\n  SpinMutexLock l(&mtx);\n  for (;;) {\n    s = tryAlloc(size);\n    if (s) return s;\n    atomic_store(&region_pos, 0, memory_order_relaxed);\n    uptr allocsz = 64 * 1024;\n    if (allocsz < size) allocsz = size;\n    uptr mem = (uptr)MmapOrDie(allocsz, \"stack depot\");\n    atomic_store(&region_end, mem + allocsz, memory_order_release);\n    atomic_store(&region_pos, mem, memory_order_release);\n  }\n}\n\nextern PersistentAllocator thePersistentAllocator;\ninline void *PersistentAlloc(uptr sz) {\n  return thePersistentAllocator.alloc(sz);\n}\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_PERSISTENT_ALLOCATOR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_placement_new.h",
    "content": "//===-- sanitizer_placement_new.h -------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//\n// The file provides 'placement new'.\n// Do not include it into header files, only into source files.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_PLACEMENT_NEW_H\n#define SANITIZER_PLACEMENT_NEW_H\n\n#include \"sanitizer_internal_defs.h\"\n\ninline void *operator new(__sanitizer::operator_new_size_type sz, void *p) {\n  return p;\n}\n\n#endif  // SANITIZER_PLACEMENT_NEW_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform.h",
    "content": "//===-- sanitizer_platform.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common platform macros.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PLATFORM_H\n#define SANITIZER_PLATFORM_H\n\n#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \\\n    !defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) &&     \\\n    !(defined(__sun__) && defined(__svr4__))\n#  error \"This operating system is not supported\"\n#endif\n\n// Get __GLIBC__ on a glibc platform. Exclude Android: features.h includes C\n// function declarations into a .S file which doesn't compile.\n// https://crbug.com/1162741\n#if __has_include(<features.h>) && !defined(__ANDROID__)\n#  include <features.h>\n#endif\n\n#if defined(__linux__)\n#  define SANITIZER_LINUX 1\n#else\n#  define SANITIZER_LINUX 0\n#endif\n\n#if defined(__GLIBC__)\n#  define SANITIZER_GLIBC 1\n#else\n#  define SANITIZER_GLIBC 0\n#endif\n\n#if defined(__FreeBSD__)\n#  define SANITIZER_FREEBSD 1\n#else\n#  define SANITIZER_FREEBSD 0\n#endif\n\n#if defined(__NetBSD__)\n#  define SANITIZER_NETBSD 1\n#else\n#  define SANITIZER_NETBSD 0\n#endif\n\n#if defined(__sun__) && defined(__svr4__)\n#  define SANITIZER_SOLARIS 1\n#else\n#  define SANITIZER_SOLARIS 0\n#endif\n\n#if defined(__APPLE__)\n#  define SANITIZER_MAC 1\n#  include <TargetConditionals.h>\n#  if TARGET_OS_OSX\n#    define SANITIZER_OSX 1\n#  else\n#    define SANITIZER_OSX 0\n#  endif\n#  if TARGET_OS_IPHONE\n#    define SANITIZER_IOS 1\n#  else\n#    define SANITIZER_IOS 0\n#  endif\n#  if TARGET_OS_SIMULATOR\n#    define SANITIZER_IOSSIM 1\n#  else\n#    define SANITIZER_IOSSIM 0\n#  endif\n#else\n#  define SANITIZER_MAC 0\n#  define SANITIZER_IOS 0\n#  define SANITIZER_IOSSIM 0\n#  define SANITIZER_OSX 0\n#endif\n\n#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH\n#  define SANITIZER_WATCHOS 1\n#else\n#  define SANITIZER_WATCHOS 0\n#endif\n\n#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV\n#  define SANITIZER_TVOS 1\n#else\n#  define SANITIZER_TVOS 0\n#endif\n\n#if defined(_WIN32)\n#  define SANITIZER_WINDOWS 1\n#else\n#  define SANITIZER_WINDOWS 0\n#endif\n\n#if defined(_WIN64)\n#  define SANITIZER_WINDOWS64 1\n#else\n#  define SANITIZER_WINDOWS64 0\n#endif\n\n#if defined(__ANDROID__)\n#  define SANITIZER_ANDROID 1\n#else\n#  define SANITIZER_ANDROID 0\n#endif\n\n#if defined(__Fuchsia__)\n#  define SANITIZER_FUCHSIA 1\n#else\n#  define SANITIZER_FUCHSIA 0\n#endif\n\n#define SANITIZER_POSIX                                     \\\n  (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \\\n   SANITIZER_NETBSD || SANITIZER_SOLARIS)\n\n#if __LP64__ || defined(_WIN64)\n#  define SANITIZER_WORDSIZE 64\n#else\n#  define SANITIZER_WORDSIZE 32\n#endif\n\n#if SANITIZER_WORDSIZE == 64\n#  define FIRST_32_SECOND_64(a, b) (b)\n#else\n#  define FIRST_32_SECOND_64(a, b) (a)\n#endif\n\n#if defined(__x86_64__) && !defined(_LP64)\n#  define SANITIZER_X32 1\n#else\n#  define SANITIZER_X32 0\n#endif\n\n#if defined(__x86_64__) || defined(_M_X64)\n#  define SANITIZER_X64 1\n#else\n#  define SANITIZER_X64 0\n#endif\n\n#if defined(__i386__) || defined(_M_IX86)\n#  define SANITIZER_I386 1\n#else\n#  define SANITIZER_I386 0\n#endif\n\n#if defined(__mips__)\n#  define SANITIZER_MIPS 1\n#  if defined(__mips64)\n#    define SANITIZER_MIPS32 0\n#    define SANITIZER_MIPS64 1\n#  else\n#    define SANITIZER_MIPS32 1\n#    define SANITIZER_MIPS64 0\n#  endif\n#else\n#  define SANITIZER_MIPS 0\n#  define SANITIZER_MIPS32 0\n#  define SANITIZER_MIPS64 0\n#endif\n\n#if defined(__s390__)\n#  define SANITIZER_S390 1\n#  if defined(__s390x__)\n#    define SANITIZER_S390_31 0\n#    define SANITIZER_S390_64 1\n#  else\n#    define SANITIZER_S390_31 1\n#    define SANITIZER_S390_64 0\n#  endif\n#else\n#  define SANITIZER_S390 0\n#  define SANITIZER_S390_31 0\n#  define SANITIZER_S390_64 0\n#endif\n\n#if defined(__powerpc__)\n#  define SANITIZER_PPC 1\n#  if defined(__powerpc64__)\n#    define SANITIZER_PPC32 0\n#    define SANITIZER_PPC64 1\n// 64-bit PPC has two ABIs (v1 and v2).  The old powerpc64 target is\n// big-endian, and uses v1 ABI (known for its function descriptors),\n// while the new powerpc64le target is little-endian and uses v2.\n// In theory, you could convince gcc to compile for their evil twins\n// (eg. big-endian v2), but you won't find such combinations in the wild\n// (it'd require bootstrapping a whole system, which would be quite painful\n// - there's no target triple for that).  LLVM doesn't support them either.\n#    if _CALL_ELF == 2\n#      define SANITIZER_PPC64V1 0\n#      define SANITIZER_PPC64V2 1\n#    else\n#      define SANITIZER_PPC64V1 1\n#      define SANITIZER_PPC64V2 0\n#    endif\n#  else\n#    define SANITIZER_PPC32 1\n#    define SANITIZER_PPC64 0\n#    define SANITIZER_PPC64V1 0\n#    define SANITIZER_PPC64V2 0\n#  endif\n#else\n#  define SANITIZER_PPC 0\n#  define SANITIZER_PPC32 0\n#  define SANITIZER_PPC64 0\n#  define SANITIZER_PPC64V1 0\n#  define SANITIZER_PPC64V2 0\n#endif\n\n#if defined(__arm__) || defined(_M_ARM)\n#  define SANITIZER_ARM 1\n#else\n#  define SANITIZER_ARM 0\n#endif\n\n#if defined(__aarch64__) || defined(_M_ARM64)\n#  define SANITIZER_ARM64 1\n#else\n#  define SANITIZER_ARM64 0\n#endif\n\n#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32\n#  define SANITIZER_SOLARIS32 1\n#else\n#  define SANITIZER_SOLARIS32 0\n#endif\n\n#if defined(__riscv) && (__riscv_xlen == 64)\n#  define SANITIZER_RISCV64 1\n#else\n#  define SANITIZER_RISCV64 0\n#endif\n\n// By default we allow to use SizeClassAllocator64 on 64-bit platform.\n// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64\n// does not work well and we need to fallback to SizeClassAllocator32.\n// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or\n// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.\n#ifndef SANITIZER_CAN_USE_ALLOCATOR64\n#  if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA\n#    define SANITIZER_CAN_USE_ALLOCATOR64 1\n#  elif defined(__mips64) || defined(__aarch64__)\n#    define SANITIZER_CAN_USE_ALLOCATOR64 0\n#  else\n#    define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)\n#  endif\n#endif\n\n// The range of addresses which can be returned my mmap.\n// FIXME: this value should be different on different platforms.  Larger values\n// will still work but will consume more memory for TwoLevelByteMap.\n#if defined(__mips__)\n#  if SANITIZER_GO && defined(__mips64)\n#    define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)\n#  else\n#    define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)\n#  endif\n#elif SANITIZER_RISCV64\n#  define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)\n#elif defined(__aarch64__)\n#  if SANITIZER_MAC\n#    if SANITIZER_OSX || SANITIZER_IOSSIM\n#      define SANITIZER_MMAP_RANGE_SIZE \\\n        FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)\n#    else\n// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM\n#      define SANITIZER_MMAP_RANGE_SIZE \\\n        FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)\n#    endif\n#  else\n#    define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)\n#  endif\n#elif defined(__sparc__)\n#  define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)\n#else\n#  define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)\n#endif\n\n// Whether the addresses are sign-extended from the VMA range to the word.\n// The SPARC64 Linux port implements this to split the VMA space into two\n// non-contiguous halves with a huge hole in the middle.\n#if defined(__sparc__) && SANITIZER_WORDSIZE == 64\n#  define SANITIZER_SIGN_EXTENDED_ADDRESSES 1\n#else\n#  define SANITIZER_SIGN_EXTENDED_ADDRESSES 0\n#endif\n\n// The AArch64 and RISC-V linux ports use the canonical syscall set as\n// mandated by the upstream linux community for all new ports. Other ports\n// may still use legacy syscalls.\n#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS\n#  if (defined(__aarch64__) || defined(__riscv) || defined(__hexagon__)) && \\\n      SANITIZER_LINUX\n#    define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1\n#  else\n#    define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0\n#  endif\n#endif\n\n// udi16 syscalls can only be used when the following conditions are\n// met:\n// * target is one of arm32, x86-32, sparc32, sh or m68k\n// * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15\n//   built against > linux-2.2 kernel headers\n// Since we don't want to include libc headers here, we check the\n// target only.\n#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)\n#  define SANITIZER_USES_UID16_SYSCALLS 1\n#else\n#  define SANITIZER_USES_UID16_SYSCALLS 0\n#endif\n\n#if defined(__mips__)\n#  define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)\n#else\n#  define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)\n#endif\n\n/// \\macro MSC_PREREQ\n/// \\brief Is the compiler MSVC of at least the specified version?\n/// The common \\param version values to check for are:\n///  * 1800: Microsoft Visual Studio 2013 / 12.0\n///  * 1900: Microsoft Visual Studio 2015 / 14.0\n#ifdef _MSC_VER\n#  define MSC_PREREQ(version) (_MSC_VER >= (version))\n#else\n#  define MSC_PREREQ(version) 0\n#endif\n\n#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)\n#  define SANITIZER_NON_UNIQUE_TYPEINFO 0\n#else\n#  define SANITIZER_NON_UNIQUE_TYPEINFO 1\n#endif\n\n// On linux, some architectures had an ABI transition from 64-bit long double\n// (ie. same as double) to 128-bit long double.  On those, glibc symbols\n// involving long doubles come in two versions, and we need to pass the\n// correct one to dlvsym when intercepting them.\n#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)\n#  define SANITIZER_NLDBL_VERSION \"GLIBC_2.4\"\n#endif\n\n#if SANITIZER_GO == 0\n#  define SANITIZER_GO 0\n#endif\n\n// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.\n// pthread_exit() performs unwinding that leads to dlopen'ing libgcc_s.so.\n// dlopen mallocs \"libgcc_s.so\" string which confuses LSan, it fails to realize\n// that this allocation happens in dynamic linker and should be ignored.\n#if SANITIZER_PPC || defined(__thumb__)\n#  define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1\n#else\n#  define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0\n#endif\n\n#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SOLARIS\n#  define SANITIZER_MADVISE_DONTNEED MADV_FREE\n#else\n#  define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED\n#endif\n\n// Older gcc have issues aligning to a constexpr, and require an integer.\n// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.\n#if defined(__powerpc__) || defined(__powerpc64__)\n#  define SANITIZER_CACHE_LINE_SIZE 128\n#else\n#  define SANITIZER_CACHE_LINE_SIZE 64\n#endif\n\n// Enable offline markup symbolizer for Fuchsia.\n#if SANITIZER_FUCHSIA\n#  define SANITIZER_SYMBOLIZER_MARKUP 1\n#else\n#  define SANITIZER_SYMBOLIZER_MARKUP 0\n#endif\n\n// Enable ability to support sanitizer initialization that is\n// compatible with the sanitizer library being loaded via\n// `dlopen()`.\n#if SANITIZER_MAC\n#  define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1\n#else\n#  define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0\n#endif\n\n// SANITIZER_SUPPORTS_THREADLOCAL\n// 1 - THREADLOCAL macro is supported by target\n// 0 - THREADLOCAL macro is not supported by target\n#ifndef __has_feature\n// TODO: Support other compilers here\n#  define SANITIZER_SUPPORTS_THREADLOCAL 1\n#else\n#  if __has_feature(tls)\n#    define SANITIZER_SUPPORTS_THREADLOCAL 1\n#  else\n#    define SANITIZER_SUPPORTS_THREADLOCAL 0\n#  endif\n#endif\n\n#if defined(__thumb__) && defined(__linux__)\n// Workaround for\n// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage\n// or\n// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage\n// It fails *rss_limit_mb_test* without meaningful errors.\n#  define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1\n#else\n#  define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0\n#endif\n\n#endif  // SANITIZER_PLATFORM_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_interceptors.h",
    "content": "//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file defines macro telling whether sanitizer tools can/should intercept\n// given library functions on a given platform.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_PLATFORM_INTERCEPTORS_H\n#define SANITIZER_PLATFORM_INTERCEPTORS_H\n\n#include \"sanitizer_glibc_version.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_POSIX\n#define SI_POSIX 1\n#else\n#define SI_POSIX 0\n#endif\n\n#if !SANITIZER_WINDOWS\n#define SI_WINDOWS 0\n#else\n#define SI_WINDOWS 1\n#endif\n\n#if SI_WINDOWS && SI_POSIX\n#error \"Windows is not POSIX!\"\n#endif\n\n#if SI_POSIX\n#include \"sanitizer_platform_limits_freebsd.h\"\n#include \"sanitizer_platform_limits_netbsd.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_platform_limits_solaris.h\"\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n#define SI_LINUX_NOT_ANDROID 1\n#else\n#define SI_LINUX_NOT_ANDROID 0\n#endif\n\n#if SANITIZER_GLIBC\n#define SI_GLIBC 1\n#else\n#define SI_GLIBC 0\n#endif\n\n#if SANITIZER_ANDROID\n#define SI_ANDROID 1\n#else\n#define SI_ANDROID 0\n#endif\n\n#if SANITIZER_FREEBSD\n#define SI_FREEBSD 1\n#else\n#define SI_FREEBSD 0\n#endif\n\n#if SANITIZER_NETBSD\n#define SI_NETBSD 1\n#else\n#define SI_NETBSD 0\n#endif\n\n#if SANITIZER_LINUX\n#define SI_LINUX 1\n#else\n#define SI_LINUX 0\n#endif\n\n#if SANITIZER_MAC\n#define SI_MAC 1\n#define SI_NOT_MAC 0\n#else\n#define SI_MAC 0\n#define SI_NOT_MAC 1\n#endif\n\n#if SANITIZER_IOS\n#define SI_IOS 1\n#else\n#define SI_IOS 0\n#endif\n\n#if SANITIZER_IOSSIM\n#define SI_IOSSIM 1\n#else\n#define SI_IOSSIM 0\n#endif\n\n#if SANITIZER_WATCHOS\n#define SI_WATCHOS 1\n#else\n#define SI_WATCHOS 0\n#endif\n\n#if SANITIZER_TVOS\n#define SI_TVOS 1\n#else\n#define SI_TVOS 0\n#endif\n\n#if SANITIZER_FUCHSIA\n#define SI_NOT_FUCHSIA 0\n#else\n#define SI_NOT_FUCHSIA 1\n#endif\n\n#if SANITIZER_SOLARIS\n#define SI_SOLARIS 1\n#else\n#define SI_SOLARIS 0\n#endif\n\n#if SANITIZER_SOLARIS32\n#define SI_SOLARIS32 1\n#else\n#define SI_SOLARIS32 0\n#endif\n\n#if SANITIZER_POSIX && !SANITIZER_MAC\n#define SI_POSIX_NOT_MAC 1\n#else\n#define SI_POSIX_NOT_MAC 0\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_FREEBSD\n#define SI_LINUX_NOT_FREEBSD 1\n#else\n#define SI_LINUX_NOT_FREEBSD 0\n#endif\n\n#define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRNLEN (SI_NOT_MAC && SI_NOT_FUCHSIA)\n#define SANITIZER_INTERCEPT_STRCMP SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRSTR SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRCASESTR SI_POSIX\n#define SANITIZER_INTERCEPT_STRTOK SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRCHR SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRCHRNUL SI_POSIX_NOT_MAC\n#define SANITIZER_INTERCEPT_STRRCHR SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRSPN SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_STRPBRK SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_STRCASECMP SI_POSIX\n#define SANITIZER_INTERCEPT_MEMSET 1\n#define SANITIZER_INTERCEPT_MEMMOVE 1\n#define SANITIZER_INTERCEPT_MEMCPY 1\n#define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_BCMP \\\n  SANITIZER_INTERCEPT_MEMCMP &&  \\\n      ((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX\n#define SANITIZER_INTERCEPT___STRNDUP SI_GLIBC\n#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \\\n    __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070\n#define SI_MAC_DEPLOYMENT_BELOW_10_7 1\n#else\n#define SI_MAC_DEPLOYMENT_BELOW_10_7 0\n#endif\n// memmem on Darwin doesn't exist on 10.6\n// FIXME: enable memmem on Windows.\n#define SANITIZER_INTERCEPT_MEMMEM (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_7)\n#define SANITIZER_INTERCEPT_MEMCHR SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_MEMRCHR (SI_FREEBSD || SI_LINUX || SI_NETBSD)\n\n#define SANITIZER_INTERCEPT_READ SI_POSIX\n#define SANITIZER_INTERCEPT_PREAD SI_POSIX\n#define SANITIZER_INTERCEPT_WRITE SI_POSIX\n#define SANITIZER_INTERCEPT_PWRITE SI_POSIX\n\n#define SANITIZER_INTERCEPT_FREAD SI_POSIX\n#define SANITIZER_INTERCEPT_FWRITE SI_POSIX\n#define SANITIZER_INTERCEPT_FGETS SI_POSIX\n#define SANITIZER_INTERCEPT_FPUTS SI_POSIX\n#define SANITIZER_INTERCEPT_PUTS SI_POSIX\n\n#define SANITIZER_INTERCEPT_PREAD64 (SI_GLIBC || SI_SOLARIS32)\n#define SANITIZER_INTERCEPT_PWRITE64 (SI_GLIBC || SI_SOLARIS32)\n\n#define SANITIZER_INTERCEPT_READV SI_POSIX\n#define SANITIZER_INTERCEPT_WRITEV SI_POSIX\n\n#define SANITIZER_INTERCEPT_PREADV \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)\n#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_PREADV64 SI_GLIBC\n#define SANITIZER_INTERCEPT_PWRITEV64 SI_GLIBC\n\n#define SANITIZER_INTERCEPT_PRCTL SI_LINUX\n\n#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_POSIX\n#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX\n\n#define SANITIZER_INTERCEPT_SCANF SI_POSIX\n#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_GLIBC\n\n#ifndef SANITIZER_INTERCEPT_PRINTF\n#define SANITIZER_INTERCEPT_PRINTF SI_POSIX\n#define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)\n#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC\n#endif\n\n#define SANITIZER_INTERCEPT___PRINTF_CHK \\\n  (SANITIZER_INTERCEPT_PRINTF && SI_GLIBC)\n\n#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA\n#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX\n\n#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX\n#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_GETPWENT \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_FGETGRENT_R (SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_GETPWENT_R \\\n  (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_FGETPWENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_SETPWENT \\\n  (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_CLOCK_GETTIME \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \\\n  (SI_LINUX || SI_FREEBSD || SI_NETBSD)\n#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX\n#define SANITIZER_INTERCEPT_TIME SI_POSIX\n#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC\n#define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX\n#define SANITIZER_INTERCEPT_WAIT SI_POSIX\n#define SANITIZER_INTERCEPT_INET SI_POSIX\n#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX\n#define SANITIZER_INTERCEPT_GETADDRINFO SI_POSIX\n#define SANITIZER_INTERCEPT_GETNAMEINFO SI_POSIX\n#define SANITIZER_INTERCEPT_GETSOCKNAME SI_POSIX\n#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_POSIX\n#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 SI_POSIX && !SI_SOLARIS\n#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R \\\n  (SI_FREEBSD || SI_LINUX || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R \\\n  (SI_FREEBSD || SI_LINUX_NOT_ANDROID)\n#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \\\n  (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX\n#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX\n#define SANITIZER_INTERCEPT_ACCEPT4 \\\n  (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD\n#define SANITIZER_INTERCEPT_MODF SI_POSIX\n#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX\n#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX\n#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX\n#define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX\n#define SANITIZER_INTERCEPT_SYSMSG SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX\n#define SANITIZER_INTERCEPT_IOCTL SI_POSIX\n#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX\n#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX\n#define SANITIZER_INTERCEPT_READDIR SI_POSIX\n#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32\n#if SI_LINUX_NOT_ANDROID &&                                                \\\n    (defined(__i386) || defined(__x86_64) || defined(__mips64) ||          \\\n     defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \\\n     defined(__s390__) || SANITIZER_RISCV64)\n#define SANITIZER_INTERCEPT_PTRACE 1\n#else\n#define SANITIZER_INTERCEPT_PTRACE 0\n#endif\n#define SANITIZER_INTERCEPT_SETLOCALE SI_POSIX\n#define SANITIZER_INTERCEPT_GETCWD SI_POSIX\n#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_STRTOIMAX SI_POSIX\n#define SANITIZER_INTERCEPT_MBSTOWCS SI_POSIX\n#define SANITIZER_INTERCEPT_MBSNRTOWCS \\\n  (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_WCSTOMBS SI_POSIX\n#define SANITIZER_INTERCEPT_STRXFRM SI_POSIX\n#define SANITIZER_INTERCEPT___STRXFRM_L SI_LINUX\n#define SANITIZER_INTERCEPT_WCSXFRM SI_POSIX\n#define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX\n#define SANITIZER_INTERCEPT_WCSNRTOMBS \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_WCRTOMB \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_WCTOMB \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_REALPATH SI_POSIX\n#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME (SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_CONFSTR \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_STRERROR SI_POSIX\n#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX\n#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_SCANDIR \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32\n#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX\n#define SANITIZER_INTERCEPT_POLL SI_POSIX\n#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_WORDEXP                                          \\\n  (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \\\n   SI_SOLARIS)\n#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX\n#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_SIGSETOPS \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX\n#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX\n#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX\n#define SANITIZER_INTERCEPT_BACKTRACE \\\n  (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX\n#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_STATFS \\\n  (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_STATFS64 \\\n  (((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)\n#define SANITIZER_INTERCEPT_STATVFS \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)\n#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX\n#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX\n#define SANITIZER_INTERCEPT_ETHER_HOST \\\n  (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)\n#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)\n#define SANITIZER_INTERCEPT_SHMCTL                                       \\\n  (((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \\\n   SI_NETBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC\n#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX\n#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC\n#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \\\n  (SI_POSIX && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_POSIX\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \\\n  (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \\\n  (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST \\\n  (SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \\\n  (SI_POSIX && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_GLIBC\n#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED (SI_POSIX && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \\\n  (SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \\\n  (SI_LINUX_NOT_ANDROID && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD\n#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX\n#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTSNAME SI_LINUX\n#define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX\n#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX\n#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX\n#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX\n#define SANITIZER_INTERCEPT_SINCOS SI_LINUX || SI_SOLARIS\n#define SANITIZER_INTERCEPT_REMQUO SI_POSIX\n#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_LGAMMA SI_POSIX\n#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_DRAND48_R SI_GLIBC\n#define SANITIZER_INTERCEPT_RAND_R \\\n  (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_ICONV \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_TIMES SI_POSIX\n\n// FIXME: getline seems to be available on OSX 10.7\n#define SANITIZER_INTERCEPT_GETLINE \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n\n#define SANITIZER_INTERCEPT__EXIT \\\n  (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)\n\n#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX\n#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC\n#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD\n#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \\\n  (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \\\n  (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)\n\n#define SANITIZER_INTERCEPT_TLS_GET_ADDR \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)\n\n#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX\n#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX\n#define SANITIZER_INTERCEPT_GETRESID SI_LINUX\n#define SANITIZER_INTERCEPT_GETIFADDRS \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_IF_INDEXTONAME \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID\n#if SI_LINUX && defined(__arm__)\n#define SANITIZER_INTERCEPT_AEABI_MEM 1\n#else\n#define SANITIZER_INTERCEPT_AEABI_MEM 0\n#endif\n#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_GLIBC\n#define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX)\n#define SANITIZER_INTERCEPT_XDR (SI_GLIBC || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_XDRREC SI_GLIBC\n#define SANITIZER_INTERCEPT_TSEARCH \\\n  (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_GLIBC\n#define SANITIZER_INTERCEPT_FOPEN SI_POSIX\n#define SANITIZER_INTERCEPT_FOPEN64 (SI_GLIBC || SI_SOLARIS32)\n#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \\\n  (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_OBSTACK SI_GLIBC\n#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX\n#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX\n\n#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE\n#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \\\n  (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS)\n#endif\n\n#define SANITIZER_INTERCEPT_GETPASS \\\n  (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD)\n#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID\n\n#define SANITIZER_INTERCEPT_MLOCKX SI_POSIX\n#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_SEM \\\n  (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX\n#define SANITIZER_INTERCEPT_MINCORE \\\n  (SI_LINUX || SI_NETBSD || SI_FREEBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX\n#define SANITIZER_INTERCEPT_CTERMID \\\n  (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)\n\n#define SANITIZER_INTERCEPTOR_HOOKS \\\n  (SI_LINUX || SI_MAC || SI_WINDOWS || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)\n#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX\n#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX\n#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX\n\n#define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33))\n#define SANITIZER_INTERCEPT_STAT                                        \\\n  (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS ||     \\\n   SI_STAT_LINUX)\n#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)\n#define SANITIZER_INTERCEPT___XSTAT \\\n  ((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)\n#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT\n#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID\n\n#define SANITIZER_INTERCEPT_UTMP \\\n  (SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_UTMPX \\\n  (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)\n\n#define SANITIZER_INTERCEPT_GETLOADAVG \\\n  (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)\n\n#define SANITIZER_INTERCEPT_MMAP SI_POSIX\n#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID || SI_SOLARIS\n#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)\n#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)\n#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC\n#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)\n#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)\n#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX\n#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)\n#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)\n#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX\n#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX\n#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)\n#define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID\n\n#define SANITIZER_INTERCEPT_ACCT (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_USER_FROM_UID SI_NETBSD\n#define SANITIZER_INTERCEPT_UID_FROM_USER SI_NETBSD\n#define SANITIZER_INTERCEPT_GROUP_FROM_GID SI_NETBSD\n#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD\n#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_GETGROUPLIST \\\n  (SI_NETBSD || SI_FREEBSD || SI_LINUX)\n#define SANITIZER_INTERCEPT_STRLCPY \\\n  (SI_NETBSD || SI_FREEBSD || SI_MAC || SI_ANDROID)\n\n#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT SI_LINUX_NOT_ANDROID\n#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT SI_LINUX_NOT_ANDROID\n\n#define SANITIZER_INTERCEPT_READLINK SI_POSIX\n#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \\\n    __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000\n#define SI_MAC_DEPLOYMENT_BELOW_10_10 1\n#else\n#define SI_MAC_DEPLOYMENT_BELOW_10_10 0\n#endif\n#define SANITIZER_INTERCEPT_READLINKAT \\\n  (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_10)\n\n#define SANITIZER_INTERCEPT_DEVNAME (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_TTYENT (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_TTYENTPATH SI_NETBSD\n#define SANITIZER_INTERCEPT_PROTOENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC\n#define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_SETVBUF \\\n  (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)\n#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)\n#define SANITIZER_INTERCEPT_MI_VECTOR_HASH SI_NETBSD\n#define SANITIZER_INTERCEPT_GETVFSSTAT SI_NETBSD\n#define SANITIZER_INTERCEPT_REGEX (SI_NETBSD || SI_FREEBSD || SI_LINUX)\n#define SANITIZER_INTERCEPT_REGEXSUB SI_NETBSD\n#define SANITIZER_INTERCEPT_FTS (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_SYSCTL (SI_NETBSD || SI_FREEBSD || SI_MAC)\n#define SANITIZER_INTERCEPT_ASYSCTL SI_NETBSD\n#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO SI_NETBSD\n#define SANITIZER_INTERCEPT_NL_LANGINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)\n#define SANITIZER_INTERCEPT_MODCTL SI_NETBSD\n#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD\n#define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_FPARSELN (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD\n#define SANITIZER_INTERCEPT_STRTOI SI_NETBSD\n#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD\n#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD\n#define SANITIZER_INTERCEPT_MD4 SI_NETBSD\n#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD\n#define SANITIZER_INTERCEPT_MD5 (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_MD2 SI_NETBSD\n#define SANITIZER_INTERCEPT_SHA2 (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_CDB SI_NETBSD\n#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_POPEN SI_POSIX\n#define SANITIZER_INTERCEPT_POPENVE SI_NETBSD\n#define SANITIZER_INTERCEPT_PCLOSE SI_POSIX\n#define SANITIZER_INTERCEPT_FUNOPEN (SI_NETBSD || SI_FREEBSD)\n#define SANITIZER_INTERCEPT_FUNOPEN2 SI_NETBSD\n#define SANITIZER_INTERCEPT_GETFSENT (SI_FREEBSD || SI_NETBSD || SI_MAC)\n#define SANITIZER_INTERCEPT_ARC4RANDOM (SI_FREEBSD || SI_NETBSD || SI_MAC)\n#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD\n#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_ANDROID)\n#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)\n#define SANITIZER_INTERCEPT_CRYPT (SI_POSIX && !SI_ANDROID)\n#define SANITIZER_INTERCEPT_CRYPT_R (SI_LINUX && !SI_ANDROID)\n\n#define SANITIZER_INTERCEPT_GETRANDOM \\\n  ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)\n#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD\n#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD\n#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD\n#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD\n#define SANITIZER_INTERCEPT_QSORT \\\n  (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)\n#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC\n#define SANITIZER_INTERCEPT_BSEARCH \\\n  (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)\n// sigaltstack on i386 macOS cannot be intercepted due to setjmp()\n// calling it and assuming that it does not clobber registers.\n#define SANITIZER_INTERCEPT_SIGALTSTACK \\\n  (SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))\n#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)\n#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD\n#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD\n\n// This macro gives a way for downstream users to override the above\n// interceptor macros irrespective of the platform they are on. They have\n// to do two things:\n// 1. Build compiler-rt with -DSANITIZER_OVERRIDE_INTERCEPTORS.\n// 2. Provide a header file named sanitizer_intercept_overriders.h in the\n//    include path for their compiler-rt build.\n// An example of an overrider for strlen interceptor that one can list in\n// sanitizer_intercept_overriders.h is as follows:\n//\n// #ifdef SANITIZER_INTERCEPT_STRLEN\n// #undef SANITIZER_INTERCEPT_STRLEN\n// #define SANITIZER_INTERCEPT_STRLEN <value of choice>\n// #endif\n//\n// This \"feature\" is useful for downstream users who do not want some of\n// their libc funtions to be intercepted. They can selectively disable\n// interception of those functions.\n#ifdef SANITIZER_OVERRIDE_INTERCEPTORS\n#include <sanitizer_intercept_overriders.h>\n#endif\n\n#endif  // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_freebsd.cpp",
    "content": "//===-- sanitizer_platform_limits_freebsd.cpp -----------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific FreeBSD data structures.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FREEBSD\n\n#include <sys/capsicum.h>\n#include <sys/consio.h>\n#include <sys/filio.h>\n#include <sys/ipc.h>\n#include <sys/kbio.h>\n#include <sys/link_elf.h>\n#include <sys/mman.h>\n#include <sys/mount.h>\n#include <sys/mqueue.h>\n#include <sys/msg.h>\n#include <sys/mtio.h>\n#include <sys/ptrace.h>\n#include <sys/resource.h>\n#include <sys/signal.h>\n#include <sys/socket.h>\n#include <sys/sockio.h>\n#include <sys/soundcard.h>\n#include <sys/stat.h>\n#include <sys/statvfs.h>\n#include <sys/time.h>\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-W#warnings\"\n#include <sys/timeb.h>\n#pragma clang diagnostic pop\n#include <sys/times.h>\n#include <sys/timespec.h>\n#include <sys/types.h>\n#include <sys/ucontext.h>\n#include <sys/utsname.h>\n//\n#include <arpa/inet.h>\n#include <net/ethernet.h>\n#include <net/if.h>\n#include <net/ppp_defs.h>\n#include <net/route.h>\n#include <netdb.h>\n#include <netinet/in.h>\n#include <netinet/ip_mroute.h>\n//\n#include <dirent.h>\n#include <dlfcn.h>\n#include <fstab.h>\n#include <fts.h>\n#include <glob.h>\n#include <grp.h>\n#include <ifaddrs.h>\n#include <limits.h>\n#include <poll.h>\n#include <pthread.h>\n#include <pwd.h>\n#include <regex.h>\n#include <semaphore.h>\n#include <signal.h>\n#include <stddef.h>\n#include <md5.h>\n#include <sha224.h>\n#include <sha256.h>\n#include <sha384.h>\n#include <sha512.h>\n#include <stdio.h>\n#include <stringlist.h>\n#include <term.h>\n#include <termios.h>\n#include <time.h>\n#include <ttyent.h>\n#include <utime.h>\n#include <utmpx.h>\n#include <vis.h>\n#include <wchar.h>\n#include <wordexp.h>\n\n#define _KERNEL  // to declare 'shminfo' structure\n#include <sys/shm.h>\n#undef _KERNEL\n\n#undef IOC_DIRMASK\n\n// Include these after system headers to avoid name clashes and ambiguities.\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_platform_limits_freebsd.h\"\n\nnamespace __sanitizer {\nvoid *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {\n  void *p = nullptr;\n  return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;\n}\n\nunsigned struct_cap_rights_sz = sizeof(cap_rights_t);\nunsigned struct_utsname_sz = sizeof(struct utsname);\nunsigned struct_stat_sz = sizeof(struct stat);\nunsigned struct_rusage_sz = sizeof(struct rusage);\nunsigned struct_tm_sz = sizeof(struct tm);\nunsigned struct_passwd_sz = sizeof(struct passwd);\nunsigned struct_group_sz = sizeof(struct group);\nunsigned siginfo_t_sz = sizeof(siginfo_t);\nunsigned struct_sigaction_sz = sizeof(struct sigaction);\nunsigned struct_stack_t_sz = sizeof(stack_t);\nunsigned struct_itimerval_sz = sizeof(struct itimerval);\nunsigned pthread_t_sz = sizeof(pthread_t);\nunsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);\nunsigned pthread_cond_t_sz = sizeof(pthread_cond_t);\nunsigned pid_t_sz = sizeof(pid_t);\nunsigned timeval_sz = sizeof(timeval);\nunsigned uid_t_sz = sizeof(uid_t);\nunsigned gid_t_sz = sizeof(gid_t);\nunsigned fpos_t_sz = sizeof(fpos_t);\nunsigned mbstate_t_sz = sizeof(mbstate_t);\nunsigned sigset_t_sz = sizeof(sigset_t);\nunsigned struct_timezone_sz = sizeof(struct timezone);\nunsigned struct_tms_sz = sizeof(struct tms);\nunsigned struct_sigevent_sz = sizeof(struct sigevent);\nunsigned struct_sched_param_sz = sizeof(struct sched_param);\nunsigned struct_statfs_sz = sizeof(struct statfs);\nunsigned struct_sockaddr_sz = sizeof(struct sockaddr);\nunsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }\nunsigned struct_rlimit_sz = sizeof(struct rlimit);\nunsigned struct_timespec_sz = sizeof(struct timespec);\nunsigned struct_utimbuf_sz = sizeof(struct utimbuf);\nunsigned struct_itimerspec_sz = sizeof(struct itimerspec);\nunsigned struct_timeb_sz = sizeof(struct timeb);\nunsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);\nunsigned struct_mq_attr_sz = sizeof(struct mq_attr);\nunsigned struct_statvfs_sz = sizeof(struct statvfs);\nunsigned struct_shminfo_sz = sizeof(struct shminfo);\nunsigned struct_shm_info_sz = sizeof(struct shm_info);\nunsigned struct_regmatch_sz = sizeof(regmatch_t);\nunsigned struct_regex_sz = sizeof(regex_t);\nunsigned struct_fstab_sz = sizeof(struct fstab);\nunsigned struct_FTS_sz = sizeof(FTS);\nunsigned struct_FTSENT_sz = sizeof(FTSENT);\nunsigned struct_StringList_sz = sizeof(StringList);\n\nconst uptr sig_ign = (uptr)SIG_IGN;\nconst uptr sig_dfl = (uptr)SIG_DFL;\nconst uptr sig_err = (uptr)SIG_ERR;\nconst uptr sa_siginfo = (uptr)SA_SIGINFO;\n\nint shmctl_ipc_stat = (int)IPC_STAT;\nint shmctl_ipc_info = (int)IPC_INFO;\nint shmctl_shm_info = (int)SHM_INFO;\nint shmctl_shm_stat = (int)SHM_STAT;\nunsigned struct_utmpx_sz = sizeof(struct utmpx);\n\nint map_fixed = MAP_FIXED;\n\nint af_inet = (int)AF_INET;\nint af_inet6 = (int)AF_INET6;\n\nuptr __sanitizer_in_addr_sz(int af) {\n  if (af == AF_INET)\n    return sizeof(struct in_addr);\n  else if (af == AF_INET6)\n    return sizeof(struct in6_addr);\n  else\n    return 0;\n}\n\nunsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);\nint glob_nomatch = GLOB_NOMATCH;\nint glob_altdirfunc = GLOB_ALTDIRFUNC;\nconst int wordexp_wrde_dooffs = WRDE_DOOFFS;\n\nunsigned path_max = PATH_MAX;\n\nint struct_ttyent_sz = sizeof(struct ttyent);\n\n// ioctl arguments\nunsigned struct_ifreq_sz = sizeof(struct ifreq);\nunsigned struct_termios_sz = sizeof(struct termios);\nunsigned struct_winsize_sz = sizeof(struct winsize);\n#if SOUND_VERSION >= 0x040000\nunsigned struct_copr_buffer_sz = 0;\nunsigned struct_copr_debug_buf_sz = 0;\nunsigned struct_copr_msg_sz = 0;\n#else\nunsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);\nunsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);\nunsigned struct_copr_msg_sz = sizeof(struct copr_msg);\n#endif\nunsigned struct_midi_info_sz = sizeof(struct midi_info);\nunsigned struct_mtget_sz = sizeof(struct mtget);\nunsigned struct_mtop_sz = sizeof(struct mtop);\nunsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);\nunsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);\nunsigned struct_synth_info_sz = sizeof(struct synth_info);\nunsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);\nunsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);\nunsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);\nunsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);\nconst unsigned long __sanitizer_bufsiz = BUFSIZ;\n\nconst unsigned IOCTL_NOT_PRESENT = 0;\n\nunsigned IOCTL_FIOASYNC = FIOASYNC;\nunsigned IOCTL_FIOCLEX = FIOCLEX;\nunsigned IOCTL_FIOGETOWN = FIOGETOWN;\nunsigned IOCTL_FIONBIO = FIONBIO;\nunsigned IOCTL_FIONCLEX = FIONCLEX;\nunsigned IOCTL_FIOSETOWN = FIOSETOWN;\nunsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;\nunsigned IOCTL_SIOCATMARK = SIOCATMARK;\nunsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;\nunsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;\nunsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;\nunsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;\nunsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;\nunsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;\nunsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;\nunsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;\nunsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;\nunsigned IOCTL_SIOCGPGRP = SIOCGPGRP;\nunsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;\nunsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;\nunsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;\nunsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;\nunsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;\nunsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;\nunsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;\nunsigned IOCTL_SIOCSPGRP = SIOCSPGRP;\nunsigned IOCTL_TIOCCONS = TIOCCONS;\nunsigned IOCTL_TIOCEXCL = TIOCEXCL;\nunsigned IOCTL_TIOCGETD = TIOCGETD;\nunsigned IOCTL_TIOCGPGRP = TIOCGPGRP;\nunsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;\nunsigned IOCTL_TIOCMBIC = TIOCMBIC;\nunsigned IOCTL_TIOCMBIS = TIOCMBIS;\nunsigned IOCTL_TIOCMGET = TIOCMGET;\nunsigned IOCTL_TIOCMSET = TIOCMSET;\nunsigned IOCTL_TIOCNOTTY = TIOCNOTTY;\nunsigned IOCTL_TIOCNXCL = TIOCNXCL;\nunsigned IOCTL_TIOCOUTQ = TIOCOUTQ;\nunsigned IOCTL_TIOCPKT = TIOCPKT;\nunsigned IOCTL_TIOCSCTTY = TIOCSCTTY;\nunsigned IOCTL_TIOCSETD = TIOCSETD;\nunsigned IOCTL_TIOCSPGRP = TIOCSPGRP;\nunsigned IOCTL_TIOCSTI = TIOCSTI;\nunsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;\nunsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;\nunsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;\nunsigned IOCTL_MTIOCGET = MTIOCGET;\nunsigned IOCTL_MTIOCTOP = MTIOCTOP;\nunsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;\nunsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;\nunsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;\nunsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;\nunsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;\nunsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;\nunsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;\nunsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;\nunsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;\nunsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;\nunsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;\nunsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE;\nunsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR;\nunsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO;\nunsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME;\nunsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE;\nunsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT;\nunsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT;\nunsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS;\nunsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS;\nunsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND;\nunsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC;\nunsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE;\nunsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET;\nunsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES;\nunsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC;\nunsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI;\nunsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD;\nunsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO;\nunsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL;\nunsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE;\nunsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME;\nunsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT;\nunsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE;\nunsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START;\nunsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP;\nunsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO;\nunsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE;\nunsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM;\nunsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS;\nunsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS;\nunsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD;\nunsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK;\nunsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE;\nunsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN;\nunsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX;\nunsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE;\nunsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1;\nunsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2;\nunsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3;\nunsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD;\nunsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC;\nunsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE;\nunsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN;\nunsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM;\nunsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV;\nunsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK;\nunsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC;\nunsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER;\nunsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS;\nunsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH;\nunsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE;\nunsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME;\nunsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM;\nunsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS;\nunsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD;\nunsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE;\nunsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN;\nunsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX;\nunsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE;\nunsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1;\nunsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2;\nunsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3;\nunsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD;\nunsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC;\nunsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE;\nunsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN;\nunsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM;\nunsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV;\nunsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC;\nunsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER;\nunsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;\nunsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;\nunsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;\nunsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;\nunsigned IOCTL_VT_GETMODE = VT_GETMODE;\nunsigned IOCTL_VT_OPENQRY = VT_OPENQRY;\nunsigned IOCTL_VT_RELDISP = VT_RELDISP;\nunsigned IOCTL_VT_SETMODE = VT_SETMODE;\nunsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;\nunsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;\nunsigned IOCTL_KDDISABIO = KDDISABIO;\nunsigned IOCTL_KDENABIO = KDENABIO;\nunsigned IOCTL_KDGETLED = KDGETLED;\nunsigned IOCTL_KDGETMODE = KDGETMODE;\nunsigned IOCTL_KDGKBMODE = KDGKBMODE;\nunsigned IOCTL_KDGKBTYPE = KDGKBTYPE;\nunsigned IOCTL_KDMKTONE = KDMKTONE;\nunsigned IOCTL_KDSETLED = KDSETLED;\nunsigned IOCTL_KDSETMODE = KDSETMODE;\nunsigned IOCTL_KDSKBMODE = KDSKBMODE;\nunsigned IOCTL_KIOCSOUND = KIOCSOUND;\nunsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;\nunsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;\n\nconst int si_SEGV_MAPERR = SEGV_MAPERR;\nconst int si_SEGV_ACCERR = SEGV_ACCERR;\nconst int unvis_valid = UNVIS_VALID;\nconst int unvis_validpush = UNVIS_VALIDPUSH;\n\nconst unsigned MD5_CTX_sz = sizeof(MD5_CTX);\nconst unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;\n\n#define SHA2_CONST(LEN)                                                      \\\n  const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX);                 \\\n  const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \\\n  const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH;          \\\n  const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH\n\nSHA2_CONST(224);\nSHA2_CONST(256);\nSHA2_CONST(384);\nSHA2_CONST(512);\n\n#undef SHA2_CONST\n}  // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nCOMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));\n\nCOMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));\nCHECK_TYPE_SIZE(pthread_key_t);\n\n// There are more undocumented fields in dl_phdr_info that we are not interested\n// in.\nCOMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);\n\nCHECK_TYPE_SIZE(glob_t);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_offs);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_flags);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_stat);\n\nCHECK_TYPE_SIZE(addrinfo);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_family);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);\n\nCHECK_TYPE_SIZE(hostent);\nCHECK_SIZE_AND_OFFSET(hostent, h_name);\nCHECK_SIZE_AND_OFFSET(hostent, h_aliases);\nCHECK_SIZE_AND_OFFSET(hostent, h_addrtype);\nCHECK_SIZE_AND_OFFSET(hostent, h_length);\nCHECK_SIZE_AND_OFFSET(hostent, h_addr_list);\n\nCHECK_TYPE_SIZE(iovec);\nCHECK_SIZE_AND_OFFSET(iovec, iov_base);\nCHECK_SIZE_AND_OFFSET(iovec, iov_len);\n\nCHECK_TYPE_SIZE(msghdr);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_name);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iov);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_control);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_flags);\n\nCHECK_TYPE_SIZE(cmsghdr);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);\n\nCOMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));\nCHECK_SIZE_AND_OFFSET(dirent, d_ino);\nCHECK_SIZE_AND_OFFSET(dirent, d_reclen);\n\nCHECK_TYPE_SIZE(ifconf);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_len);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);\n\nCHECK_TYPE_SIZE(pollfd);\nCHECK_SIZE_AND_OFFSET(pollfd, fd);\nCHECK_SIZE_AND_OFFSET(pollfd, events);\nCHECK_SIZE_AND_OFFSET(pollfd, revents);\n\nCHECK_TYPE_SIZE(nfds_t);\n\nCHECK_TYPE_SIZE(sigset_t);\n\nCOMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));\n// Can't write checks for sa_handler and sa_sigaction due to them being\n// preprocessor macros.\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);\n\nCHECK_TYPE_SIZE(wordexp_t);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);\n\nCHECK_TYPE_SIZE(tm);\nCHECK_SIZE_AND_OFFSET(tm, tm_sec);\nCHECK_SIZE_AND_OFFSET(tm, tm_min);\nCHECK_SIZE_AND_OFFSET(tm, tm_hour);\nCHECK_SIZE_AND_OFFSET(tm, tm_mday);\nCHECK_SIZE_AND_OFFSET(tm, tm_mon);\nCHECK_SIZE_AND_OFFSET(tm, tm_year);\nCHECK_SIZE_AND_OFFSET(tm, tm_wday);\nCHECK_SIZE_AND_OFFSET(tm, tm_yday);\nCHECK_SIZE_AND_OFFSET(tm, tm_isdst);\nCHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);\nCHECK_SIZE_AND_OFFSET(tm, tm_zone);\n\nCHECK_TYPE_SIZE(ether_addr);\n\nCHECK_TYPE_SIZE(ipc_perm);\nCHECK_SIZE_AND_OFFSET(ipc_perm, key);\nCHECK_SIZE_AND_OFFSET(ipc_perm, seq);\nCHECK_SIZE_AND_OFFSET(ipc_perm, uid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, gid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cuid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cgid);\n\nCHECK_TYPE_SIZE(shmid_ds);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);\n\nCHECK_TYPE_SIZE(clock_t);\n\nCHECK_TYPE_SIZE(ifaddrs);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);\n#undef ifa_dstaddr\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);\n\nCHECK_TYPE_SIZE(timeb);\nCHECK_SIZE_AND_OFFSET(timeb, time);\nCHECK_SIZE_AND_OFFSET(timeb, millitm);\nCHECK_SIZE_AND_OFFSET(timeb, timezone);\nCHECK_SIZE_AND_OFFSET(timeb, dstflag);\n\nCHECK_TYPE_SIZE(passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_name);\nCHECK_SIZE_AND_OFFSET(passwd, pw_passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_uid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_gid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_dir);\nCHECK_SIZE_AND_OFFSET(passwd, pw_shell);\n\nCHECK_SIZE_AND_OFFSET(passwd, pw_gecos);\n\nCHECK_TYPE_SIZE(group);\nCHECK_SIZE_AND_OFFSET(group, gr_name);\nCHECK_SIZE_AND_OFFSET(group, gr_passwd);\nCHECK_SIZE_AND_OFFSET(group, gr_gid);\nCHECK_SIZE_AND_OFFSET(group, gr_mem);\n\n#if HAVE_RPC_XDR_H\nCHECK_TYPE_SIZE(XDR);\nCHECK_SIZE_AND_OFFSET(XDR, x_op);\nCHECK_SIZE_AND_OFFSET(XDR, x_ops);\nCHECK_SIZE_AND_OFFSET(XDR, x_public);\nCHECK_SIZE_AND_OFFSET(XDR, x_private);\nCHECK_SIZE_AND_OFFSET(XDR, x_base);\nCHECK_SIZE_AND_OFFSET(XDR, x_handy);\nCOMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);\nCOMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);\nCOMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);\n#endif\n\nCHECK_TYPE_SIZE(sem_t);\n\nCOMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));\n#endif  // SANITIZER_FREEBSD\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_freebsd.h",
    "content": "//===-- sanitizer_platform_limits_freebsd.h -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific FreeBSD data structures.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PLATFORM_LIMITS_FREEBSD_H\n#define SANITIZER_PLATFORM_LIMITS_FREEBSD_H\n\n#if SANITIZER_FREEBSD\n\n#  include \"sanitizer_internal_defs.h\"\n#  include \"sanitizer_platform.h\"\n#  include \"sanitizer_platform_limits_posix.h\"\n\n// Get sys/_types.h, because that tells us whether 64-bit inodes are\n// used in struct dirent below.\n#  include <sys/_types.h>\n\nnamespace __sanitizer {\nvoid *__sanitizer_get_link_map_by_dlopen_handle(void *handle);\n#  define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \\\n    (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)\n\nextern unsigned struct_utsname_sz;\nextern unsigned struct_stat_sz;\n#  if defined(__powerpc64__)\nconst unsigned struct___old_kernel_stat_sz = 0;\n#  else\nconst unsigned struct___old_kernel_stat_sz = 32;\n#  endif\nextern unsigned struct_rusage_sz;\nextern unsigned siginfo_t_sz;\nextern unsigned struct_itimerval_sz;\nextern unsigned pthread_t_sz;\nextern unsigned pthread_mutex_t_sz;\nextern unsigned pthread_cond_t_sz;\nextern unsigned pid_t_sz;\nextern unsigned timeval_sz;\nextern unsigned uid_t_sz;\nextern unsigned gid_t_sz;\nextern unsigned fpos_t_sz;\nextern unsigned mbstate_t_sz;\nextern unsigned struct_timezone_sz;\nextern unsigned struct_tms_sz;\nextern unsigned struct_itimerspec_sz;\nextern unsigned struct_sigevent_sz;\nextern unsigned struct_stack_t_sz;\nextern unsigned struct_sched_param_sz;\nextern unsigned struct_statfs64_sz;\nextern unsigned struct_statfs_sz;\nextern unsigned struct_sockaddr_sz;\nunsigned ucontext_t_sz(void *ctx);\nextern unsigned struct_rlimit_sz;\nextern unsigned struct_utimbuf_sz;\nextern unsigned struct_timespec_sz;\nextern unsigned struct_regmatch_sz;\nextern unsigned struct_regex_sz;\nextern unsigned struct_FTS_sz;\nextern unsigned struct_FTSENT_sz;\nextern const int unvis_valid;\nextern const int unvis_validpush;\n\nstruct __sanitizer_iocb {\n  u64 aio_data;\n  u32 aio_key_or_aio_reserved1;  // Simply crazy.\n  u32 aio_reserved1_or_aio_key;  // Luckily, we don't need these.\n  u16 aio_lio_opcode;\n  s16 aio_reqprio;\n  u32 aio_fildes;\n  u64 aio_buf;\n  u64 aio_nbytes;\n  s64 aio_offset;\n  u64 aio_reserved2;\n  u64 aio_reserved3;\n};\n\nstruct __sanitizer_io_event {\n  u64 data;\n  u64 obj;\n  u64 res;\n  u64 res2;\n};\n\nconst unsigned iocb_cmd_pread = 0;\nconst unsigned iocb_cmd_pwrite = 1;\nconst unsigned iocb_cmd_preadv = 7;\nconst unsigned iocb_cmd_pwritev = 8;\n\nstruct __sanitizer___sysctl_args {\n  int *name;\n  int nlen;\n  void *oldval;\n  uptr *oldlenp;\n  void *newval;\n  uptr newlen;\n  unsigned long ___unused[4];\n};\n\nstruct __sanitizer_ipc_perm {\n  unsigned int cuid;\n  unsigned int cgid;\n  unsigned int uid;\n  unsigned int gid;\n  unsigned short mode;\n  unsigned short seq;\n  long key;\n};\n\nstruct __sanitizer_protoent {\n  char *p_name;\n  char **p_aliases;\n  int p_proto;\n};\n\nstruct __sanitizer_netent {\n  char *n_name;\n  char **n_aliases;\n  int n_addrtype;\n  u32 n_net;\n};\n\n#  if !defined(__i386__)\ntypedef long long __sanitizer_time_t;\n#  else\ntypedef long __sanitizer_time_t;\n#  endif\n\nstruct __sanitizer_shmid_ds {\n  __sanitizer_ipc_perm shm_perm;\n  unsigned long shm_segsz;\n  unsigned int shm_lpid;\n  unsigned int shm_cpid;\n  int shm_nattch;\n  __sanitizer_time_t shm_atime;\n  __sanitizer_time_t shm_dtime;\n  __sanitizer_time_t shm_ctime;\n};\n\nextern unsigned struct_msqid_ds_sz;\nextern unsigned struct_mq_attr_sz;\nextern unsigned struct_timeb_sz;\nextern unsigned struct_statvfs_sz;\n\nstruct __sanitizer_iovec {\n  void *iov_base;\n  uptr iov_len;\n};\n\nstruct __sanitizer_ifaddrs {\n  struct __sanitizer_ifaddrs *ifa_next;\n  char *ifa_name;\n  unsigned int ifa_flags;\n  void *ifa_addr;     // (struct sockaddr *)\n  void *ifa_netmask;  // (struct sockaddr *)\n#  undef ifa_dstaddr\n  void *ifa_dstaddr;  // (struct sockaddr *)\n  void *ifa_data;\n};\n\ntypedef unsigned __sanitizer_pthread_key_t;\n\nstruct __sanitizer_passwd {\n  char *pw_name;\n  char *pw_passwd;\n  int pw_uid;\n  int pw_gid;\n  __sanitizer_time_t pw_change;\n  char *pw_class;\n  char *pw_gecos;\n  char *pw_dir;\n  char *pw_shell;\n  __sanitizer_time_t pw_expire;\n  int pw_fields;\n};\n\nstruct __sanitizer_group {\n  char *gr_name;\n  char *gr_passwd;\n  int gr_gid;\n  char **gr_mem;\n};\n\ntypedef long __sanitizer_suseconds_t;\n\nstruct __sanitizer_timeval {\n  __sanitizer_time_t tv_sec;\n  __sanitizer_suseconds_t tv_usec;\n};\n\nstruct __sanitizer_itimerval {\n  struct __sanitizer_timeval it_interval;\n  struct __sanitizer_timeval it_value;\n};\n\nstruct __sanitizer_timeb {\n  __sanitizer_time_t time;\n  unsigned short millitm;\n  short timezone;\n  short dstflag;\n};\n\nstruct __sanitizer_ether_addr {\n  u8 octet[6];\n};\n\nstruct __sanitizer_tm {\n  int tm_sec;\n  int tm_min;\n  int tm_hour;\n  int tm_mday;\n  int tm_mon;\n  int tm_year;\n  int tm_wday;\n  int tm_yday;\n  int tm_isdst;\n  long int tm_gmtoff;\n  const char *tm_zone;\n};\n\nstruct __sanitizer_msghdr {\n  void *msg_name;\n  unsigned msg_namelen;\n  struct __sanitizer_iovec *msg_iov;\n  unsigned msg_iovlen;\n  void *msg_control;\n  unsigned msg_controllen;\n  int msg_flags;\n};\n\nstruct __sanitizer_cmsghdr {\n  unsigned cmsg_len;\n  int cmsg_level;\n  int cmsg_type;\n};\n\nstruct __sanitizer_dirent {\n#  if defined(__INO64)\n  unsigned long long d_fileno;\n  unsigned long long d_off;\n#  else\n  unsigned int d_fileno;\n#  endif\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n\n// 'clock_t' is 32 bits wide on x64 FreeBSD\ntypedef int __sanitizer_clock_t;\ntypedef int __sanitizer_clockid_t;\n\n#  if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \\\n      defined(__mips__)\ntypedef unsigned __sanitizer___kernel_uid_t;\ntypedef unsigned __sanitizer___kernel_gid_t;\n#  else\ntypedef unsigned short __sanitizer___kernel_uid_t;\ntypedef unsigned short __sanitizer___kernel_gid_t;\n#  endif\ntypedef long long __sanitizer___kernel_off_t;\n\n#  if defined(__powerpc__) || defined(__mips__)\ntypedef unsigned int __sanitizer___kernel_old_uid_t;\ntypedef unsigned int __sanitizer___kernel_old_gid_t;\n#  else\ntypedef unsigned short __sanitizer___kernel_old_uid_t;\ntypedef unsigned short __sanitizer___kernel_old_gid_t;\n#  endif\n\ntypedef long long __sanitizer___kernel_loff_t;\ntypedef struct {\n  unsigned long fds_bits[1024 / (8 * sizeof(long))];\n} __sanitizer___kernel_fd_set;\n\n// This thing depends on the platform. We are only interested in the upper\n// limit. Verified with a compiler assert in .cpp.\nunion __sanitizer_pthread_attr_t {\n  char size[128];\n  void *align;\n};\n\nconst unsigned old_sigset_t_sz = sizeof(unsigned long);\n\nstruct __sanitizer_sigset_t {\n  // uint32_t * 4\n  unsigned int __bits[4];\n};\n\ntypedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;\n\nstruct __sanitizer_siginfo {\n  // The size is determined by looking at sizeof of real siginfo_t on linux.\n  u64 opaque[128 / sizeof(u64)];\n};\n\nusing __sanitizer_sighandler_ptr = void (*)(int sig);\nusing __sanitizer_sigactionhandler_ptr = void (*)(int sig,\n                                                  __sanitizer_siginfo *siginfo,\n                                                  void *uctx);\n\nstruct __sanitizer_sigaction {\n  union {\n    __sanitizer_sigactionhandler_ptr sigaction;\n    __sanitizer_sighandler_ptr handler;\n  };\n  int sa_flags;\n  __sanitizer_sigset_t sa_mask;\n};\n\nstruct __sanitizer_sem_t {\n  u32 data[4];\n};\n\nextern const uptr sig_ign;\nextern const uptr sig_dfl;\nextern const uptr sig_err;\nextern const uptr sa_siginfo;\n\nextern int af_inet;\nextern int af_inet6;\nuptr __sanitizer_in_addr_sz(int af);\n\nstruct __sanitizer_dl_phdr_info {\n  uptr dlpi_addr;\n  const char *dlpi_name;\n  const void *dlpi_phdr;\n  short dlpi_phnum;\n};\n\nextern unsigned struct_ElfW_Phdr_sz;\n\nstruct __sanitizer_addrinfo {\n  int ai_flags;\n  int ai_family;\n  int ai_socktype;\n  int ai_protocol;\n  unsigned ai_addrlen;\n  char *ai_canonname;\n  void *ai_addr;\n  struct __sanitizer_addrinfo *ai_next;\n};\n\nstruct __sanitizer_hostent {\n  char *h_name;\n  char **h_aliases;\n  int h_addrtype;\n  int h_length;\n  char **h_addr_list;\n};\n\nstruct __sanitizer_pollfd {\n  int fd;\n  short events;\n  short revents;\n};\n\ntypedef unsigned __sanitizer_nfds_t;\n\nstruct __sanitizer_glob_t {\n  uptr gl_pathc;\n  uptr gl_matchc;\n  uptr gl_offs;\n  int gl_flags;\n  char **gl_pathv;\n  int (*gl_errfunc)(const char *, int);\n  void (*gl_closedir)(void *dirp);\n  struct dirent *(*gl_readdir)(void *dirp);\n  void *(*gl_opendir)(const char *);\n  int (*gl_lstat)(const char *, void * /* struct stat* */);\n  int (*gl_stat)(const char *, void * /* struct stat* */);\n};\n\nextern int glob_nomatch;\nextern int glob_altdirfunc;\nextern const int wordexp_wrde_dooffs;\n\nextern unsigned path_max;\n\nextern int struct_ttyent_sz;\n\nstruct __sanitizer_wordexp_t {\n  uptr we_wordc;\n  char **we_wordv;\n  uptr we_offs;\n  char *we_strings;\n  uptr we_nbytes;\n};\n\ntypedef void __sanitizer_FILE;\n\nextern unsigned struct_shminfo_sz;\nextern unsigned struct_shm_info_sz;\nextern int shmctl_ipc_stat;\nextern int shmctl_ipc_info;\nextern int shmctl_shm_info;\nextern int shmctl_shm_stat;\n\nextern unsigned struct_utmpx_sz;\n\nextern int map_fixed;\n\n// ioctl arguments\nstruct __sanitizer_ifconf {\n  int ifc_len;\n  union {\n    void *ifcu_req;\n  } ifc_ifcu;\n};\n\nstruct __sanitizer__ttyent {\n  char *ty_name;\n  char *ty_getty;\n  char *ty_type;\n  int ty_status;\n  char *ty_window;\n  char *ty_comment;\n  char *ty_group;\n};\n\n#  define IOC_NRBITS 8\n#  define IOC_TYPEBITS 8\n#  if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)\n#    define IOC_SIZEBITS 13\n#    define IOC_DIRBITS 3\n#    define IOC_NONE 1U\n#    define IOC_WRITE 4U\n#    define IOC_READ 2U\n#  else\n#    define IOC_SIZEBITS 14\n#    define IOC_DIRBITS 2\n#    define IOC_NONE 0U\n#    define IOC_WRITE 1U\n#    define IOC_READ 2U\n#  endif\n#  define IOC_NRMASK ((1 << IOC_NRBITS) - 1)\n#  define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)\n#  define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)\n#  if defined(IOC_DIRMASK)\n#    undef IOC_DIRMASK\n#  endif\n#  define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)\n#  define IOC_NRSHIFT 0\n#  define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)\n#  define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)\n#  define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)\n#  define EVIOC_EV_MAX 0x1f\n#  define EVIOC_ABS_MAX 0x3f\n\n#  define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)\n#  define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)\n#  define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)\n#  define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)\n\nextern unsigned struct_ifreq_sz;\nextern unsigned struct_termios_sz;\nextern unsigned struct_winsize_sz;\n\nextern unsigned struct_copr_buffer_sz;\nextern unsigned struct_copr_debug_buf_sz;\nextern unsigned struct_copr_msg_sz;\nextern unsigned struct_midi_info_sz;\nextern unsigned struct_mtget_sz;\nextern unsigned struct_mtop_sz;\nextern unsigned struct_rtentry_sz;\nextern unsigned struct_sbi_instrument_sz;\nextern unsigned struct_seq_event_rec_sz;\nextern unsigned struct_synth_info_sz;\nextern unsigned struct_vt_mode_sz;\n\nextern const unsigned long __sanitizer_bufsiz;\nextern unsigned struct_audio_buf_info_sz;\nextern unsigned struct_ppp_stats_sz;\nextern unsigned struct_sioc_sg_req_sz;\nextern unsigned struct_sioc_vif_req_sz;\n\n// ioctl request identifiers\n\n// A special value to mark ioctls that are not present on the target platform,\n// when it can not be determined without including any system headers.\nextern const unsigned IOCTL_NOT_PRESENT;\n\nextern unsigned IOCTL_FIOASYNC;\nextern unsigned IOCTL_FIOCLEX;\nextern unsigned IOCTL_FIOGETOWN;\nextern unsigned IOCTL_FIONBIO;\nextern unsigned IOCTL_FIONCLEX;\nextern unsigned IOCTL_FIOSETOWN;\nextern unsigned IOCTL_SIOCADDMULTI;\nextern unsigned IOCTL_SIOCATMARK;\nextern unsigned IOCTL_SIOCDELMULTI;\nextern unsigned IOCTL_SIOCGIFADDR;\nextern unsigned IOCTL_SIOCGIFBRDADDR;\nextern unsigned IOCTL_SIOCGIFCONF;\nextern unsigned IOCTL_SIOCGIFDSTADDR;\nextern unsigned IOCTL_SIOCGIFFLAGS;\nextern unsigned IOCTL_SIOCGIFMETRIC;\nextern unsigned IOCTL_SIOCGIFMTU;\nextern unsigned IOCTL_SIOCGIFNETMASK;\nextern unsigned IOCTL_SIOCGPGRP;\nextern unsigned IOCTL_SIOCSIFADDR;\nextern unsigned IOCTL_SIOCSIFBRDADDR;\nextern unsigned IOCTL_SIOCSIFDSTADDR;\nextern unsigned IOCTL_SIOCSIFFLAGS;\nextern unsigned IOCTL_SIOCSIFMETRIC;\nextern unsigned IOCTL_SIOCSIFMTU;\nextern unsigned IOCTL_SIOCSIFNETMASK;\nextern unsigned IOCTL_SIOCSPGRP;\nextern unsigned IOCTL_TIOCCONS;\nextern unsigned IOCTL_TIOCEXCL;\nextern unsigned IOCTL_TIOCGETD;\nextern unsigned IOCTL_TIOCGPGRP;\nextern unsigned IOCTL_TIOCGWINSZ;\nextern unsigned IOCTL_TIOCMBIC;\nextern unsigned IOCTL_TIOCMBIS;\nextern unsigned IOCTL_TIOCMGET;\nextern unsigned IOCTL_TIOCMSET;\nextern unsigned IOCTL_TIOCNOTTY;\nextern unsigned IOCTL_TIOCNXCL;\nextern unsigned IOCTL_TIOCOUTQ;\nextern unsigned IOCTL_TIOCPKT;\nextern unsigned IOCTL_TIOCSCTTY;\nextern unsigned IOCTL_TIOCSETD;\nextern unsigned IOCTL_TIOCSPGRP;\nextern unsigned IOCTL_TIOCSTI;\nextern unsigned IOCTL_TIOCSWINSZ;\nextern unsigned IOCTL_SIOCGETSGCNT;\nextern unsigned IOCTL_SIOCGETVIFCNT;\nextern unsigned IOCTL_MTIOCGET;\nextern unsigned IOCTL_MTIOCTOP;\nextern unsigned IOCTL_SIOCADDRT;\nextern unsigned IOCTL_SIOCDELRT;\nextern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;\nextern unsigned IOCTL_SNDCTL_DSP_GETFMTS;\nextern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;\nextern unsigned IOCTL_SNDCTL_DSP_POST;\nextern unsigned IOCTL_SNDCTL_DSP_RESET;\nextern unsigned IOCTL_SNDCTL_DSP_SETFMT;\nextern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;\nextern unsigned IOCTL_SNDCTL_DSP_SPEED;\nextern unsigned IOCTL_SNDCTL_DSP_STEREO;\nextern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;\nextern unsigned IOCTL_SNDCTL_DSP_SYNC;\nextern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;\nextern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;\nextern unsigned IOCTL_SNDCTL_MIDI_INFO;\nextern unsigned IOCTL_SNDCTL_MIDI_PRETIME;\nextern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;\nextern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;\nextern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;\nextern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;\nextern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;\nextern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;\nextern unsigned IOCTL_SNDCTL_SEQ_PANIC;\nextern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;\nextern unsigned IOCTL_SNDCTL_SEQ_RESET;\nextern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;\nextern unsigned IOCTL_SNDCTL_SEQ_SYNC;\nextern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;\nextern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;\nextern unsigned IOCTL_SNDCTL_SYNTH_INFO;\nextern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;\nextern unsigned IOCTL_SNDCTL_TMR_CONTINUE;\nextern unsigned IOCTL_SNDCTL_TMR_METRONOME;\nextern unsigned IOCTL_SNDCTL_TMR_SELECT;\nextern unsigned IOCTL_SNDCTL_TMR_SOURCE;\nextern unsigned IOCTL_SNDCTL_TMR_START;\nextern unsigned IOCTL_SNDCTL_TMR_STOP;\nextern unsigned IOCTL_SNDCTL_TMR_TEMPO;\nextern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;\nextern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;\nextern unsigned IOCTL_SOUND_MIXER_READ_BASS;\nextern unsigned IOCTL_SOUND_MIXER_READ_CAPS;\nextern unsigned IOCTL_SOUND_MIXER_READ_CD;\nextern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;\nextern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;\nextern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;\nextern unsigned IOCTL_SOUND_MIXER_READ_IMIX;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE1;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE2;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE3;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE;\nextern unsigned IOCTL_SOUND_MIXER_READ_LOUD;\nextern unsigned IOCTL_SOUND_MIXER_READ_MIC;\nextern unsigned IOCTL_SOUND_MIXER_READ_MUTE;\nextern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;\nextern unsigned IOCTL_SOUND_MIXER_READ_PCM;\nextern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;\nextern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;\nextern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;\nextern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;\nextern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;\nextern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;\nextern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;\nextern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_CD;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;\nextern unsigned IOCTL_SOUND_PCM_READ_BITS;\nextern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;\nextern unsigned IOCTL_SOUND_PCM_READ_FILTER;\nextern unsigned IOCTL_SOUND_PCM_READ_RATE;\nextern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;\nextern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;\nextern unsigned IOCTL_VT_ACTIVATE;\nextern unsigned IOCTL_VT_GETMODE;\nextern unsigned IOCTL_VT_OPENQRY;\nextern unsigned IOCTL_VT_RELDISP;\nextern unsigned IOCTL_VT_SETMODE;\nextern unsigned IOCTL_VT_WAITACTIVE;\nextern unsigned IOCTL_GIO_SCRNMAP;\nextern unsigned IOCTL_KDDISABIO;\nextern unsigned IOCTL_KDENABIO;\nextern unsigned IOCTL_KDGETLED;\nextern unsigned IOCTL_KDGETMODE;\nextern unsigned IOCTL_KDGKBMODE;\nextern unsigned IOCTL_KDGKBTYPE;\nextern unsigned IOCTL_KDMKTONE;\nextern unsigned IOCTL_KDSETLED;\nextern unsigned IOCTL_KDSETMODE;\nextern unsigned IOCTL_KDSKBMODE;\n\nextern const int si_SEGV_MAPERR;\nextern const int si_SEGV_ACCERR;\n\nextern const unsigned MD5_CTX_sz;\nextern const unsigned MD5_return_length;\n\n#define SHA2_EXTERN(LEN)                          \\\n  extern const unsigned SHA##LEN##_CTX_sz;        \\\n  extern const unsigned SHA##LEN##_return_length; \\\n  extern const unsigned SHA##LEN##_block_length;  \\\n  extern const unsigned SHA##LEN##_digest_length\n\nSHA2_EXTERN(224);\nSHA2_EXTERN(256);\nSHA2_EXTERN(384);\nSHA2_EXTERN(512);\n\n#undef SHA2_EXTERN\n\nstruct __sanitizer_cap_rights {\n  u64 cr_rights[2];\n};\n\ntypedef struct __sanitizer_cap_rights __sanitizer_cap_rights_t;\nextern unsigned struct_cap_rights_sz;\n\nextern unsigned struct_fstab_sz;\nextern unsigned struct_StringList_sz;\n}  // namespace __sanitizer\n\n#  define CHECK_TYPE_SIZE(TYPE) \\\n    COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))\n\n#  define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER)                      \\\n    COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \\\n                   sizeof(((CLASS *)NULL)->MEMBER));                \\\n    COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) ==         \\\n                   offsetof(CLASS, MEMBER))\n\n// For sigaction, which is a function and struct at the same time,\n// and thus requires explicit \"struct\" in sizeof() expression.\n#  define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER)                      \\\n    COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \\\n                   sizeof(((struct CLASS *)NULL)->MEMBER));                \\\n    COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) ==         \\\n                   offsetof(struct CLASS, MEMBER))\n\n#  define SIGACTION_SYMNAME sigaction\n\n#endif\n\n#endif  // SANITIZER_FREEBSD\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_linux.cpp",
    "content": "//===-- sanitizer_platform_limits_linux.cpp -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of linux kernel data structures.\n//===----------------------------------------------------------------------===//\n\n// This is a separate compilation unit for linux headers that conflict with\n// userspace headers.\n// Most \"normal\" includes go in sanitizer_platform_limits_posix.cpp\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_LINUX\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n\n// For offsetof -> __builtin_offsetof definition.\n#include <stddef.h>\n\n// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that\n// are not defined anywhere in userspace headers. Fake them. This seems to work\n// fine with newer headers, too.\n#include <linux/posix_types.h>\n#  if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)\n#    include <sys/stat.h>\n#  else\n#    define ino_t __kernel_ino_t\n#    define mode_t __kernel_mode_t\n#    define nlink_t __kernel_nlink_t\n#    define uid_t __kernel_uid_t\n#    define gid_t __kernel_gid_t\n#    define off_t __kernel_off_t\n#    define time_t __kernel_time_t\n// This header seems to contain the definitions of _kernel_ stat* structs.\n#    include <asm/stat.h>\n#    undef ino_t\n#    undef mode_t\n#    undef nlink_t\n#    undef uid_t\n#    undef gid_t\n#    undef off_t\n#  endif\n\n#  include <linux/aio_abi.h>\n\n#  if !SANITIZER_ANDROID\n#    include <sys/statfs.h>\n#    include <linux/perf_event.h>\n#  endif\n\nusing namespace __sanitizer;\n\nnamespace __sanitizer {\n#if !SANITIZER_ANDROID\n  unsigned struct_statfs64_sz = sizeof(struct statfs64);\n#endif\n}  // namespace __sanitizer\n\n#  if !defined(__powerpc64__) && !defined(__x86_64__) &&                   \\\n      !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \\\n      !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__)\nCOMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));\n#endif\n\nCOMPILER_CHECK(struct_kernel_stat_sz == sizeof(struct stat));\n\n#if defined(__i386__)\nCOMPILER_CHECK(struct_kernel_stat64_sz == sizeof(struct stat64));\n#endif\n\nCHECK_TYPE_SIZE(io_event);\nCHECK_SIZE_AND_OFFSET(io_event, data);\nCHECK_SIZE_AND_OFFSET(io_event, obj);\nCHECK_SIZE_AND_OFFSET(io_event, res);\nCHECK_SIZE_AND_OFFSET(io_event, res2);\n\n#if !SANITIZER_ANDROID\nCOMPILER_CHECK(sizeof(struct __sanitizer_perf_event_attr) <=\n               sizeof(struct perf_event_attr));\nCHECK_SIZE_AND_OFFSET(perf_event_attr, type);\nCHECK_SIZE_AND_OFFSET(perf_event_attr, size);\n#endif\n\nCOMPILER_CHECK(iocb_cmd_pread == IOCB_CMD_PREAD);\nCOMPILER_CHECK(iocb_cmd_pwrite == IOCB_CMD_PWRITE);\n#if !SANITIZER_ANDROID\nCOMPILER_CHECK(iocb_cmd_preadv == IOCB_CMD_PREADV);\nCOMPILER_CHECK(iocb_cmd_pwritev == IOCB_CMD_PWRITEV);\n#endif\n\nCHECK_TYPE_SIZE(iocb);\nCHECK_SIZE_AND_OFFSET(iocb, aio_data);\n// Skip aio_key, it's weird.\nCHECK_SIZE_AND_OFFSET(iocb, aio_lio_opcode);\nCHECK_SIZE_AND_OFFSET(iocb, aio_reqprio);\nCHECK_SIZE_AND_OFFSET(iocb, aio_fildes);\nCHECK_SIZE_AND_OFFSET(iocb, aio_buf);\nCHECK_SIZE_AND_OFFSET(iocb, aio_nbytes);\nCHECK_SIZE_AND_OFFSET(iocb, aio_offset);\n\n#endif  // SANITIZER_LINUX\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_netbsd.cpp",
    "content": "//===-- sanitizer_platform_limits_netbsd.cpp ------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific NetBSD data structures.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_NETBSD\n\n#define _KMEMUSER\n#define RAY_DO_SIGLEV\n#define __LEGACY_PT_LWPINFO\n\n// clang-format off\n#include <sys/param.h>\n#include <sys/types.h>\n#include <sys/sysctl.h>\n#include <sys/disk.h>\n#include <sys/disklabel.h>\n#include <sys/mount.h>\n#include <sys/agpio.h>\n#include <sys/ataio.h>\n#include <sys/audioio.h>\n#include <sys/cdbr.h>\n#include <sys/cdio.h>\n#include <sys/chio.h>\n#include <sys/clockctl.h>\n#include <sys/cpuio.h>\n#include <sys/dkbad.h>\n#include <sys/dkio.h>\n#include <sys/drvctlio.h>\n#include <sys/dvdio.h>\n#include <sys/envsys.h>\n#include <sys/event.h>\n#include <sys/fdio.h>\n#include <sys/filio.h>\n#include <sys/gpio.h>\n#include <sys/ioctl.h>\n#include <sys/ioctl_compat.h>\n#include <sys/joystick.h>\n#include <sys/ksyms.h>\n#include <sys/lua.h>\n#include <sys/midiio.h>\n#include <sys/mtio.h>\n#include <sys/power.h>\n#include <sys/radioio.h>\n#include <sys/rndio.h>\n#include <sys/scanio.h>\n#include <sys/scsiio.h>\n#include <sys/sockio.h>\n#include <sys/timepps.h>\n#include <sys/ttycom.h>\n#include <sys/verified_exec.h>\n#include <sys/videoio.h>\n#include <sys/wdog.h>\n#include <sys/event.h>\n#include <sys/filio.h>\n#include <sys/ipc.h>\n#include <sys/ipmi.h>\n#include <sys/kcov.h>\n#include <sys/mman.h>\n#include <sys/module.h>\n#include <sys/mount.h>\n#include <sys/mqueue.h>\n#include <sys/msg.h>\n#include <sys/mtio.h>\n#include <sys/ptrace.h>\n\n// Compat for NetBSD < 9.99.30.\n#ifndef PT_LWPSTATUS\n#define PT_LWPSTATUS 24\n#endif\n#ifndef PT_LWPNEXT\n#define PT_LWPNEXT 25\n#endif\n\n#include <sys/resource.h>\n#include <sys/sem.h>\n#include <sys/scsiio.h>\n#include <sys/sha1.h>\n#include <sys/sha2.h>\n#include <sys/shm.h>\n#include <sys/signal.h>\n#include <sys/socket.h>\n#include <sys/sockio.h>\n#include <sys/soundcard.h>\n#include <sys/stat.h>\n#include <sys/statvfs.h>\n#include <sys/time.h>\n#include <sys/timeb.h>\n#include <sys/times.h>\n#include <sys/timespec.h>\n#include <sys/timex.h>\n#include <sys/types.h>\n#include <sys/ucontext.h>\n#include <sys/utsname.h>\n#include <altq/altq.h>\n#include <altq/altq_afmap.h>\n#include <altq/altq_blue.h>\n#include <altq/altq_cbq.h>\n#include <altq/altq_cdnr.h>\n#include <altq/altq_fifoq.h>\n#include <altq/altq_hfsc.h>\n#include <altq/altq_jobs.h>\n#include <altq/altq_priq.h>\n#include <altq/altq_red.h>\n#include <altq/altq_rio.h>\n#include <altq/altq_wfq.h>\n#include <arpa/inet.h>\n#include <crypto/cryptodev.h>\n#include <dev/apm/apmio.h>\n#include <dev/dm/netbsd-dm.h>\n#include <dev/dmover/dmover_io.h>\n#include <dev/dtv/dtvio_demux.h>\n#include <dev/dtv/dtvio_frontend.h>\n#if !__NetBSD_Prereq__(9, 99, 26)\n#include <dev/filemon/filemon.h>\n#else\n#define FILEMON_SET_FD          _IOWR('S', 1, int)\n#define FILEMON_SET_PID         _IOWR('S', 2, pid_t)\n#endif\n#include <dev/hdaudio/hdaudioio.h>\n#include <dev/hdmicec/hdmicecio.h>\n#include <dev/hpc/hpcfbio.h>\n#include <dev/i2o/iopio.h>\n#include <dev/ic/athioctl.h>\n#include <dev/ic/bt8xx.h>\n#include <dev/ic/icp_ioctl.h>\n#include <dev/ic/isp_ioctl.h>\n#include <dev/ic/mlxio.h>\n#include <dev/ic/qemufwcfgio.h>\n#include <dev/ic/nvmeio.h>\n#include <dev/ir/irdaio.h>\n#include <dev/isa/isvio.h>\n#include <dev/isa/wtreg.h>\n#if __has_include(<dev/iscsi/iscsi_ioctl.h>)\n#include <dev/iscsi/iscsi_ioctl.h>\n#else\n/* Fallback for MKISCSI=no */\n\ntypedef struct {\n  uint32_t status;\n  uint32_t session_id;\n  uint32_t connection_id;\n} iscsi_conn_status_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint16_t interface_version;\n  uint16_t major;\n  uint16_t minor;\n  uint8_t version_string[224];\n} iscsi_get_version_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint32_t session_id;\n  uint32_t connection_id;\n  struct {\n    unsigned int immediate : 1;\n  } options;\n  uint64_t lun;\n  scsireq_t req; /* from <sys/scsiio.h> */\n} iscsi_iocommand_parameters_t;\n\ntypedef enum {\n  ISCSI_AUTH_None = 0,\n  ISCSI_AUTH_CHAP = 1,\n  ISCSI_AUTH_KRB5 = 2,\n  ISCSI_AUTH_SRP = 3\n} iscsi_auth_types_t;\n\ntypedef enum {\n  ISCSI_LOGINTYPE_DISCOVERY = 0,\n  ISCSI_LOGINTYPE_NOMAP = 1,\n  ISCSI_LOGINTYPE_MAP = 2\n} iscsi_login_session_type_t;\n\ntypedef enum { ISCSI_DIGEST_None = 0, ISCSI_DIGEST_CRC32C = 1 } iscsi_digest_t;\n\ntypedef enum {\n  ISCSI_SESSION_TERMINATED = 1,\n  ISCSI_CONNECTION_TERMINATED,\n  ISCSI_RECOVER_CONNECTION,\n  ISCSI_DRIVER_TERMINATING\n} iscsi_event_t;\n\ntypedef struct {\n  unsigned int mutual_auth : 1;\n  unsigned int is_secure : 1;\n  unsigned int auth_number : 4;\n  iscsi_auth_types_t auth_type[4];\n} iscsi_auth_info_t;\n\ntypedef struct {\n  uint32_t status;\n  int socket;\n  struct {\n    unsigned int HeaderDigest : 1;\n    unsigned int DataDigest : 1;\n    unsigned int MaxConnections : 1;\n    unsigned int DefaultTime2Wait : 1;\n    unsigned int DefaultTime2Retain : 1;\n    unsigned int MaxRecvDataSegmentLength : 1;\n    unsigned int auth_info : 1;\n    unsigned int user_name : 1;\n    unsigned int password : 1;\n    unsigned int target_password : 1;\n    unsigned int TargetName : 1;\n    unsigned int TargetAlias : 1;\n    unsigned int ErrorRecoveryLevel : 1;\n  } is_present;\n  iscsi_auth_info_t auth_info;\n  iscsi_login_session_type_t login_type;\n  iscsi_digest_t HeaderDigest;\n  iscsi_digest_t DataDigest;\n  uint32_t session_id;\n  uint32_t connection_id;\n  uint32_t MaxRecvDataSegmentLength;\n  uint16_t MaxConnections;\n  uint16_t DefaultTime2Wait;\n  uint16_t DefaultTime2Retain;\n  uint16_t ErrorRecoveryLevel;\n  void *user_name;\n  void *password;\n  void *target_password;\n  void *TargetName;\n  void *TargetAlias;\n} iscsi_login_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint32_t session_id;\n} iscsi_logout_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint32_t event_id;\n} iscsi_register_event_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint32_t session_id;\n  uint32_t connection_id;\n} iscsi_remove_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint32_t session_id;\n  void *response_buffer;\n  uint32_t response_size;\n  uint32_t response_used;\n  uint32_t response_total;\n  uint8_t key[224];\n} iscsi_send_targets_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint8_t InitiatorName[224];\n  uint8_t InitiatorAlias[224];\n  uint8_t ISID[6];\n} iscsi_set_node_name_parameters_t;\n\ntypedef struct {\n  uint32_t status;\n  uint32_t event_id;\n  iscsi_event_t event_kind;\n  uint32_t session_id;\n  uint32_t connection_id;\n  uint32_t reason;\n} iscsi_wait_event_parameters_t;\n\n#define ISCSI_GET_VERSION _IOWR(0, 1, iscsi_get_version_parameters_t)\n#define ISCSI_LOGIN _IOWR(0, 2, iscsi_login_parameters_t)\n#define ISCSI_LOGOUT _IOWR(0, 3, iscsi_logout_parameters_t)\n#define ISCSI_ADD_CONNECTION _IOWR(0, 4, iscsi_login_parameters_t)\n#define ISCSI_RESTORE_CONNECTION _IOWR(0, 5, iscsi_login_parameters_t)\n#define ISCSI_REMOVE_CONNECTION _IOWR(0, 6, iscsi_remove_parameters_t)\n#define ISCSI_CONNECTION_STATUS _IOWR(0, 7, iscsi_conn_status_parameters_t)\n#define ISCSI_SEND_TARGETS _IOWR(0, 8, iscsi_send_targets_parameters_t)\n#define ISCSI_SET_NODE_NAME _IOWR(0, 9, iscsi_set_node_name_parameters_t)\n#define ISCSI_IO_COMMAND _IOWR(0, 10, iscsi_iocommand_parameters_t)\n#define ISCSI_REGISTER_EVENT _IOWR(0, 11, iscsi_register_event_parameters_t)\n#define ISCSI_DEREGISTER_EVENT _IOWR(0, 12, iscsi_register_event_parameters_t)\n#define ISCSI_WAIT_EVENT _IOWR(0, 13, iscsi_wait_event_parameters_t)\n#define ISCSI_POLL_EVENT _IOWR(0, 14, iscsi_wait_event_parameters_t)\n#endif\n#include <dev/ofw/openfirmio.h>\n#include <dev/pci/amrio.h>\n#include <dev/pci/mlyreg.h>\n#include <dev/pci/mlyio.h>\n#include <dev/pci/pciio.h>\n#include <dev/pci/tweio.h>\n#include <dev/pcmcia/if_cnwioctl.h>\n#include <net/bpf.h>\n#include <net/if_gre.h>\n#include <net/ppp_defs.h>\n#include <net/if_ppp.h>\n#include <net/if_pppoe.h>\n#include <net/if_sppp.h>\n#include <net/if_srt.h>\n#include <net/if_tap.h>\n#include <net/if_tun.h>\n#include <net/npf.h>\n#include <net/pfvar.h>\n#include <net/slip.h>\n#include <netbt/hci.h>\n#include <netinet/ip_compat.h>\n#if __has_include(<netinet/ip_fil.h>)\n#include <netinet/ip_fil.h>\n#include <netinet/ip_nat.h>\n#include <netinet/ip_proxy.h>\n#else\n/* Fallback for MKIPFILTER=no */\n\ntypedef struct ap_control {\n  char apc_label[16];\n  char apc_config[16];\n  unsigned char apc_p;\n  unsigned long apc_cmd;\n  unsigned long apc_arg;\n  void *apc_data;\n  size_t apc_dsize;\n} ap_ctl_t;\n\ntypedef struct ipftq {\n  ipfmutex_t ifq_lock;\n  unsigned int ifq_ttl;\n  void *ifq_head;\n  void **ifq_tail;\n  void *ifq_next;\n  void **ifq_pnext;\n  int ifq_ref;\n  unsigned int ifq_flags;\n} ipftq_t;\n\ntypedef struct ipfobj {\n  uint32_t ipfo_rev;\n  uint32_t ipfo_size;\n  void *ipfo_ptr;\n  int ipfo_type;\n  int ipfo_offset;\n  int ipfo_retval;\n  unsigned char ipfo_xxxpad[28];\n} ipfobj_t;\n\n#define SIOCADNAT _IOW('r', 60, struct ipfobj)\n#define SIOCRMNAT _IOW('r', 61, struct ipfobj)\n#define SIOCGNATS _IOWR('r', 62, struct ipfobj)\n#define SIOCGNATL _IOWR('r', 63, struct ipfobj)\n#define SIOCPURGENAT _IOWR('r', 100, struct ipfobj)\n#endif\n#include <netinet6/in6_var.h>\n#include <netinet6/nd6.h>\n#if !__NetBSD_Prereq__(9, 99, 51)\n#include <netsmb/smb_dev.h>\n#else\nstruct smbioc_flags {\n  int ioc_level;\n  int ioc_mask;\n  int ioc_flags;\n};\nstruct smbioc_oshare {\n  int ioc_opt;\n  int ioc_stype;\n  char ioc_share[129];\n  char ioc_password[129];\n  uid_t ioc_owner;\n  gid_t ioc_group;\n  mode_t ioc_mode;\n  mode_t ioc_rights;\n};\nstruct smbioc_ossn {\n  int ioc_opt;\n  uint32_t ioc_svlen;\n  struct sockaddr *ioc_server;\n  uint32_t ioc_lolen;\n  struct sockaddr *ioc_local;\n  char ioc_srvname[16];\n  int ioc_timeout;\n  int ioc_retrycount;\n  char ioc_localcs[16];\n  char ioc_servercs[16];\n  char ioc_user[129];\n  char ioc_workgroup[129];\n  char ioc_password[129];\n  uid_t ioc_owner;\n  gid_t ioc_group;\n  mode_t ioc_mode;\n  mode_t ioc_rights;\n};\nstruct smbioc_lookup {\n  int ioc_level;\n  int ioc_flags;\n  struct smbioc_ossn ioc_ssn;\n  struct smbioc_oshare ioc_sh;\n};\nstruct smbioc_rq {\n  u_char ioc_cmd;\n  u_char ioc_twc;\n  void *ioc_twords;\n  u_short ioc_tbc;\n  void *ioc_tbytes;\n  int ioc_rpbufsz;\n  char *ioc_rpbuf;\n  u_char ioc_rwc;\n  u_short ioc_rbc;\n};\nstruct smbioc_rw {\n  u_int16_t ioc_fh;\n  char *ioc_base;\n  off_t ioc_offset;\n  int ioc_cnt;\n};\n#define SMBIOC_OPENSESSION _IOW('n', 100, struct smbioc_ossn)\n#define SMBIOC_OPENSHARE _IOW('n', 101, struct smbioc_oshare)\n#define SMBIOC_REQUEST _IOWR('n', 102, struct smbioc_rq)\n#define SMBIOC_T2RQ _IOWR('n', 103, struct smbioc_t2rq)\n#define SMBIOC_SETFLAGS _IOW('n', 104, struct smbioc_flags)\n#define SMBIOC_LOOKUP _IOW('n', 106, struct smbioc_lookup)\n#define SMBIOC_READ _IOWR('n', 107, struct smbioc_rw)\n#define SMBIOC_WRITE _IOWR('n', 108, struct smbioc_rw)\n#endif\n#include <dev/biovar.h>\n#include <dev/bluetooth/btdev.h>\n#include <dev/bluetooth/btsco.h>\n#include <dev/ccdvar.h>\n#include <dev/cgdvar.h>\n#include <dev/fssvar.h>\n#include <dev/kttcpio.h>\n#include <dev/lockstat.h>\n#include <dev/md.h>\n#include <net/if_ether.h>\n#include <dev/pcmcia/if_rayreg.h>\n#include <stdio.h>\n#include <dev/raidframe/raidframeio.h>\n#include <dev/sbus/mbppio.h>\n#include <dev/scsipi/ses.h>\n#include <dev/spi/spi_io.h>\n#include <dev/spkrio.h>\n#include <dev/sun/disklabel.h>\n#include <dev/sun/fbio.h>\n#include <dev/sun/kbio.h>\n#include <dev/sun/vuid_event.h>\n#include <dev/tc/sticio.h>\n#include <dev/usb/ukyopon.h>\n#if !__NetBSD_Prereq__(9, 99, 44)\n#include <dev/usb/urio.h>\n#else\nstruct urio_command {\n  unsigned short length;\n  int request;\n  int requesttype;\n  int value;\n  int index;\n  void *buffer;\n  int timeout;\n};\n#define URIO_SEND_COMMAND      _IOWR('U', 200, struct urio_command)\n#define URIO_RECV_COMMAND      _IOWR('U', 201, struct urio_command)\n#endif\n#include <dev/usb/usb.h>\n#include <dev/usb/utoppy.h>\n#include <dev/vme/xio.h>\n#include <dev/vndvar.h>\n#include <dev/wscons/wsconsio.h>\n#include <dev/wscons/wsdisplay_usl_io.h>\n#include <fs/autofs/autofs_ioctl.h>\n#include <dirent.h>\n#include <dlfcn.h>\n#include <glob.h>\n#include <grp.h>\n#include <ifaddrs.h>\n#include <limits.h>\n#include <link_elf.h>\n#include <net/if.h>\n#include <net/route.h>\n#include <netdb.h>\n#include <netinet/in.h>\n#include <netinet/ip_mroute.h>\n#include <netinet/sctp_uio.h>\n#include <poll.h>\n#include <pthread.h>\n#include <pwd.h>\n#include <semaphore.h>\n#include <signal.h>\n#include <stddef.h>\n#include <md2.h>\n#include <md4.h>\n#include <md5.h>\n#include <rmd160.h>\n#include <soundcard.h>\n#include <term.h>\n#include <termios.h>\n#include <time.h>\n#include <ttyent.h>\n#include <utime.h>\n#include <utmp.h>\n#include <utmpx.h>\n#include <vis.h>\n#include <wchar.h>\n#include <wordexp.h>\n#include <ttyent.h>\n#include <fts.h>\n#include <regex.h>\n#include <fstab.h>\n#include <stringlist.h>\n\n#if defined(__x86_64__)\n#include <nvmm.h>\n#endif\n// clang-format on\n\n// Include these after system headers to avoid name clashes and ambiguities.\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_platform_limits_netbsd.h\"\n\nnamespace __sanitizer {\nvoid *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {\n  void *p = nullptr;\n  return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;\n}\n\nunsigned struct_utsname_sz = sizeof(struct utsname);\nunsigned struct_stat_sz = sizeof(struct stat);\nunsigned struct_rusage_sz = sizeof(struct rusage);\nunsigned struct_tm_sz = sizeof(struct tm);\nunsigned struct_passwd_sz = sizeof(struct passwd);\nunsigned struct_group_sz = sizeof(struct group);\nunsigned siginfo_t_sz = sizeof(siginfo_t);\nunsigned struct_sigaction_sz = sizeof(struct sigaction);\nunsigned struct_stack_t_sz = sizeof(stack_t);\nunsigned struct_itimerval_sz = sizeof(struct itimerval);\nunsigned pthread_t_sz = sizeof(pthread_t);\nunsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);\nunsigned pthread_cond_t_sz = sizeof(pthread_cond_t);\nunsigned pid_t_sz = sizeof(pid_t);\nunsigned timeval_sz = sizeof(timeval);\nunsigned uid_t_sz = sizeof(uid_t);\nunsigned gid_t_sz = sizeof(gid_t);\nunsigned mbstate_t_sz = sizeof(mbstate_t);\nunsigned sigset_t_sz = sizeof(sigset_t);\nunsigned struct_timezone_sz = sizeof(struct timezone);\nunsigned struct_tms_sz = sizeof(struct tms);\nunsigned struct_sigevent_sz = sizeof(struct sigevent);\nunsigned struct_sched_param_sz = sizeof(struct sched_param);\nunsigned struct_sockaddr_sz = sizeof(struct sockaddr);\nunsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }\nunsigned struct_rlimit_sz = sizeof(struct rlimit);\nunsigned struct_timespec_sz = sizeof(struct timespec);\nunsigned struct_sembuf_sz = sizeof(struct sembuf);\nunsigned struct_kevent_sz = sizeof(struct kevent);\nunsigned struct_FTS_sz = sizeof(FTS);\nunsigned struct_FTSENT_sz = sizeof(FTSENT);\nunsigned struct_regex_sz = sizeof(regex_t);\nunsigned struct_regmatch_sz = sizeof(regmatch_t);\nunsigned struct_fstab_sz = sizeof(struct fstab);\nunsigned struct_utimbuf_sz = sizeof(struct utimbuf);\nunsigned struct_itimerspec_sz = sizeof(struct itimerspec);\nunsigned struct_timex_sz = sizeof(struct timex);\nunsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);\nunsigned struct_mq_attr_sz = sizeof(struct mq_attr);\nunsigned struct_statvfs_sz = sizeof(struct statvfs);\nunsigned struct_sigaltstack_sz = sizeof(stack_t);\n\nconst uptr sig_ign = (uptr)SIG_IGN;\nconst uptr sig_dfl = (uptr)SIG_DFL;\nconst uptr sig_err = (uptr)SIG_ERR;\nconst uptr sa_siginfo = (uptr)SA_SIGINFO;\n\nconst unsigned long __sanitizer_bufsiz = BUFSIZ;\n\nint ptrace_pt_io = PT_IO;\nint ptrace_pt_lwpinfo = PT_LWPINFO;\nint ptrace_pt_set_event_mask = PT_SET_EVENT_MASK;\nint ptrace_pt_get_event_mask = PT_GET_EVENT_MASK;\nint ptrace_pt_get_process_state = PT_GET_PROCESS_STATE;\nint ptrace_pt_set_siginfo = PT_SET_SIGINFO;\nint ptrace_pt_get_siginfo = PT_GET_SIGINFO;\nint ptrace_pt_lwpstatus = PT_LWPSTATUS;\nint ptrace_pt_lwpnext = PT_LWPNEXT;\nint ptrace_piod_read_d = PIOD_READ_D;\nint ptrace_piod_write_d = PIOD_WRITE_D;\nint ptrace_piod_read_i = PIOD_READ_I;\nint ptrace_piod_write_i = PIOD_WRITE_I;\nint ptrace_piod_read_auxv = PIOD_READ_AUXV;\n\n#if defined(PT_SETREGS) && defined(PT_GETREGS)\nint ptrace_pt_setregs = PT_SETREGS;\nint ptrace_pt_getregs = PT_GETREGS;\n#else\nint ptrace_pt_setregs = -1;\nint ptrace_pt_getregs = -1;\n#endif\n\n#if defined(PT_SETFPREGS) && defined(PT_GETFPREGS)\nint ptrace_pt_setfpregs = PT_SETFPREGS;\nint ptrace_pt_getfpregs = PT_GETFPREGS;\n#else\nint ptrace_pt_setfpregs = -1;\nint ptrace_pt_getfpregs = -1;\n#endif\n\n#if defined(PT_SETDBREGS) && defined(PT_GETDBREGS)\nint ptrace_pt_setdbregs = PT_SETDBREGS;\nint ptrace_pt_getdbregs = PT_GETDBREGS;\n#else\nint ptrace_pt_setdbregs = -1;\nint ptrace_pt_getdbregs = -1;\n#endif\n\nunsigned struct_ptrace_ptrace_io_desc_struct_sz = sizeof(struct ptrace_io_desc);\nunsigned struct_ptrace_ptrace_lwpinfo_struct_sz = sizeof(struct ptrace_lwpinfo);\nunsigned struct_ptrace_ptrace_lwpstatus_struct_sz =\n    sizeof(struct __sanitizer_ptrace_lwpstatus);\nunsigned struct_ptrace_ptrace_event_struct_sz = sizeof(ptrace_event_t);\nunsigned struct_ptrace_ptrace_siginfo_struct_sz = sizeof(ptrace_siginfo_t);\n\n#if defined(PT_SETREGS)\nunsigned struct_ptrace_reg_struct_sz = sizeof(struct reg);\n#else\nunsigned struct_ptrace_reg_struct_sz = -1;\n#endif\n\n#if defined(PT_SETFPREGS)\nunsigned struct_ptrace_fpreg_struct_sz = sizeof(struct fpreg);\n#else\nunsigned struct_ptrace_fpreg_struct_sz = -1;\n#endif\n\n#if defined(PT_SETDBREGS)\nunsigned struct_ptrace_dbreg_struct_sz = sizeof(struct dbreg);\n#else\nunsigned struct_ptrace_dbreg_struct_sz = -1;\n#endif\n\nint shmctl_ipc_stat = (int)IPC_STAT;\n\nunsigned struct_utmp_sz = sizeof(struct utmp);\nunsigned struct_utmpx_sz = sizeof(struct utmpx);\n\nint map_fixed = MAP_FIXED;\n\nint af_inet = (int)AF_INET;\nint af_inet6 = (int)AF_INET6;\n\nuptr __sanitizer_in_addr_sz(int af) {\n  if (af == AF_INET)\n    return sizeof(struct in_addr);\n  else if (af == AF_INET6)\n    return sizeof(struct in6_addr);\n  else\n    return 0;\n}\n\nunsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);\n\nint glob_nomatch = GLOB_NOMATCH;\nint glob_altdirfunc = GLOB_ALTDIRFUNC;\nconst int wordexp_wrde_dooffs = WRDE_DOOFFS;\n\nunsigned path_max = PATH_MAX;\n\nint struct_ttyent_sz = sizeof(struct ttyent);\n\nstruct __sanitizer_nvlist_ref_t {\n  void *buf;\n  uptr len;\n  int flags;\n};\n\ntypedef __sanitizer_nvlist_ref_t nvlist_ref_t;\n\n// ioctl arguments\nunsigned struct_altqreq_sz = sizeof(altqreq);\nunsigned struct_amr_user_ioctl_sz = sizeof(amr_user_ioctl);\nunsigned struct_ap_control_sz = sizeof(ap_control);\nunsigned struct_apm_ctl_sz = sizeof(apm_ctl);\nunsigned struct_apm_event_info_sz = sizeof(apm_event_info);\nunsigned struct_apm_power_info_sz = sizeof(apm_power_info);\nunsigned struct_atabusiodetach_args_sz = sizeof(atabusiodetach_args);\nunsigned struct_atabusioscan_args_sz = sizeof(atabusioscan_args);\nunsigned struct_ath_diag_sz = sizeof(ath_diag);\nunsigned struct_atm_flowmap_sz = sizeof(atm_flowmap);\nunsigned struct_audio_buf_info_sz = sizeof(audio_buf_info);\nunsigned struct_audio_device_sz = sizeof(audio_device);\nunsigned struct_audio_encoding_sz = sizeof(audio_encoding);\nunsigned struct_audio_info_sz = sizeof(audio_info);\nunsigned struct_audio_offset_sz = sizeof(audio_offset);\nunsigned struct_bio_locate_sz = sizeof(bio_locate);\nunsigned struct_bioc_alarm_sz = sizeof(bioc_alarm);\nunsigned struct_bioc_blink_sz = sizeof(bioc_blink);\nunsigned struct_bioc_disk_sz = sizeof(bioc_disk);\nunsigned struct_bioc_inq_sz = sizeof(bioc_inq);\nunsigned struct_bioc_setstate_sz = sizeof(bioc_setstate);\nunsigned struct_bioc_vol_sz = sizeof(bioc_vol);\nunsigned struct_bioc_volops_sz = sizeof(bioc_volops);\nunsigned struct_bktr_chnlset_sz = sizeof(bktr_chnlset);\nunsigned struct_bktr_remote_sz = sizeof(bktr_remote);\nunsigned struct_blue_conf_sz = sizeof(blue_conf);\nunsigned struct_blue_interface_sz = sizeof(blue_interface);\nunsigned struct_blue_stats_sz = sizeof(blue_stats);\nunsigned struct_bpf_dltlist_sz = sizeof(bpf_dltlist);\nunsigned struct_bpf_program_sz = sizeof(bpf_program);\nunsigned struct_bpf_stat_old_sz = sizeof(bpf_stat_old);\nunsigned struct_bpf_stat_sz = sizeof(bpf_stat);\nunsigned struct_bpf_version_sz = sizeof(bpf_version);\nunsigned struct_btreq_sz = sizeof(btreq);\nunsigned struct_btsco_info_sz = sizeof(btsco_info);\nunsigned struct_buffmem_desc_sz = sizeof(buffmem_desc);\nunsigned struct_cbq_add_class_sz = sizeof(cbq_add_class);\nunsigned struct_cbq_add_filter_sz = sizeof(cbq_add_filter);\nunsigned struct_cbq_delete_class_sz = sizeof(cbq_delete_class);\nunsigned struct_cbq_delete_filter_sz = sizeof(cbq_delete_filter);\nunsigned struct_cbq_getstats_sz = sizeof(cbq_getstats);\nunsigned struct_cbq_interface_sz = sizeof(cbq_interface);\nunsigned struct_cbq_modify_class_sz = sizeof(cbq_modify_class);\nunsigned struct_ccd_ioctl_sz = sizeof(ccd_ioctl);\nunsigned struct_cdnr_add_element_sz = sizeof(cdnr_add_element);\nunsigned struct_cdnr_add_filter_sz = sizeof(cdnr_add_filter);\nunsigned struct_cdnr_add_tbmeter_sz = sizeof(cdnr_add_tbmeter);\nunsigned struct_cdnr_add_trtcm_sz = sizeof(cdnr_add_trtcm);\nunsigned struct_cdnr_add_tswtcm_sz = sizeof(cdnr_add_tswtcm);\nunsigned struct_cdnr_delete_element_sz = sizeof(cdnr_delete_element);\nunsigned struct_cdnr_delete_filter_sz = sizeof(cdnr_delete_filter);\nunsigned struct_cdnr_get_stats_sz = sizeof(cdnr_get_stats);\nunsigned struct_cdnr_interface_sz = sizeof(cdnr_interface);\nunsigned struct_cdnr_modify_tbmeter_sz = sizeof(cdnr_modify_tbmeter);\nunsigned struct_cdnr_modify_trtcm_sz = sizeof(cdnr_modify_trtcm);\nunsigned struct_cdnr_modify_tswtcm_sz = sizeof(cdnr_modify_tswtcm);\nunsigned struct_cdnr_tbmeter_stats_sz = sizeof(cdnr_tbmeter_stats);\nunsigned struct_cdnr_tcm_stats_sz = sizeof(cdnr_tcm_stats);\nunsigned struct_cgd_ioctl_sz = sizeof(cgd_ioctl);\nunsigned struct_cgd_user_sz = sizeof(cgd_user);\nunsigned struct_changer_element_status_request_sz =\n    sizeof(changer_element_status_request);\nunsigned struct_changer_exchange_request_sz = sizeof(changer_exchange_request);\nunsigned struct_changer_move_request_sz = sizeof(changer_move_request);\nunsigned struct_changer_params_sz = sizeof(changer_params);\nunsigned struct_changer_position_request_sz = sizeof(changer_position_request);\nunsigned struct_changer_set_voltag_request_sz =\n    sizeof(changer_set_voltag_request);\nunsigned struct_clockctl_adjtime_sz = sizeof(clockctl_adjtime);\nunsigned struct_clockctl_clock_settime_sz = sizeof(clockctl_clock_settime);\nunsigned struct_clockctl_ntp_adjtime_sz = sizeof(clockctl_ntp_adjtime);\nunsigned struct_clockctl_settimeofday_sz = sizeof(clockctl_settimeofday);\nunsigned struct_cnwistats_sz = sizeof(cnwistats);\nunsigned struct_cnwitrail_sz = sizeof(cnwitrail);\nunsigned struct_cnwstatus_sz = sizeof(cnwstatus);\nunsigned struct_count_info_sz = sizeof(count_info);\nunsigned struct_cpu_ucode_sz = sizeof(cpu_ucode);\nunsigned struct_cpu_ucode_version_sz = sizeof(cpu_ucode_version);\nunsigned struct_crypt_kop_sz = sizeof(crypt_kop);\nunsigned struct_crypt_mkop_sz = sizeof(crypt_mkop);\nunsigned struct_crypt_mop_sz = sizeof(crypt_mop);\nunsigned struct_crypt_op_sz = sizeof(crypt_op);\nunsigned struct_crypt_result_sz = sizeof(crypt_result);\nunsigned struct_crypt_sfop_sz = sizeof(crypt_sfop);\nunsigned struct_crypt_sgop_sz = sizeof(crypt_sgop);\nunsigned struct_cryptret_sz = sizeof(cryptret);\nunsigned struct_devdetachargs_sz = sizeof(devdetachargs);\nunsigned struct_devlistargs_sz = sizeof(devlistargs);\nunsigned struct_devpmargs_sz = sizeof(devpmargs);\nunsigned struct_devrescanargs_sz = sizeof(devrescanargs);\nunsigned struct_disk_badsecinfo_sz = sizeof(disk_badsecinfo);\nunsigned struct_disk_strategy_sz = sizeof(disk_strategy);\nunsigned struct_disklabel_sz = sizeof(disklabel);\nunsigned struct_dkbad_sz = sizeof(dkbad);\nunsigned struct_dkwedge_info_sz = sizeof(dkwedge_info);\nunsigned struct_dkwedge_list_sz = sizeof(dkwedge_list);\nunsigned struct_dmio_setfunc_sz = sizeof(dmio_setfunc);\nunsigned struct_dmx_pes_filter_params_sz = sizeof(dmx_pes_filter_params);\nunsigned struct_dmx_sct_filter_params_sz = sizeof(dmx_sct_filter_params);\nunsigned struct_dmx_stc_sz = sizeof(dmx_stc);\nunsigned struct_dvb_diseqc_master_cmd_sz = sizeof(dvb_diseqc_master_cmd);\nunsigned struct_dvb_diseqc_slave_reply_sz = sizeof(dvb_diseqc_slave_reply);\nunsigned struct_dvb_frontend_event_sz = sizeof(dvb_frontend_event);\nunsigned struct_dvb_frontend_info_sz = sizeof(dvb_frontend_info);\nunsigned struct_dvb_frontend_parameters_sz = sizeof(dvb_frontend_parameters);\nunsigned struct_eccapreq_sz = sizeof(eccapreq);\nunsigned struct_fbcmap_sz = sizeof(fbcmap);\nunsigned struct_fbcurpos_sz = sizeof(fbcurpos);\nunsigned struct_fbcursor_sz = sizeof(fbcursor);\nunsigned struct_fbgattr_sz = sizeof(fbgattr);\nunsigned struct_fbsattr_sz = sizeof(fbsattr);\nunsigned struct_fbtype_sz = sizeof(fbtype);\nunsigned struct_fdformat_cmd_sz = sizeof(fdformat_cmd);\nunsigned struct_fdformat_parms_sz = sizeof(fdformat_parms);\nunsigned struct_fifoq_conf_sz = sizeof(fifoq_conf);\nunsigned struct_fifoq_getstats_sz = sizeof(fifoq_getstats);\nunsigned struct_fifoq_interface_sz = sizeof(fifoq_interface);\nunsigned struct_format_op_sz = sizeof(format_op);\nunsigned struct_fss_get_sz = sizeof(fss_get);\nunsigned struct_fss_set_sz = sizeof(fss_set);\nunsigned struct_gpio_attach_sz = sizeof(gpio_attach);\nunsigned struct_gpio_info_sz = sizeof(gpio_info);\nunsigned struct_gpio_req_sz = sizeof(gpio_req);\nunsigned struct_gpio_set_sz = sizeof(gpio_set);\nunsigned struct_hfsc_add_class_sz = sizeof(hfsc_add_class);\nunsigned struct_hfsc_add_filter_sz = sizeof(hfsc_add_filter);\nunsigned struct_hfsc_attach_sz = sizeof(hfsc_attach);\nunsigned struct_hfsc_class_stats_sz = sizeof(hfsc_class_stats);\nunsigned struct_hfsc_delete_class_sz = sizeof(hfsc_delete_class);\nunsigned struct_hfsc_delete_filter_sz = sizeof(hfsc_delete_filter);\nunsigned struct_hfsc_interface_sz = sizeof(hfsc_interface);\nunsigned struct_hfsc_modify_class_sz = sizeof(hfsc_modify_class);\nunsigned struct_hpcfb_dsp_op_sz = sizeof(hpcfb_dsp_op);\nunsigned struct_hpcfb_dspconf_sz = sizeof(hpcfb_dspconf);\nunsigned struct_hpcfb_fbconf_sz = sizeof(hpcfb_fbconf);\nunsigned struct_if_addrprefreq_sz = sizeof(if_addrprefreq);\nunsigned struct_if_clonereq_sz = sizeof(if_clonereq);\nunsigned struct_if_laddrreq_sz = sizeof(if_laddrreq);\nunsigned struct_ifaddr_sz = sizeof(ifaddr);\nunsigned struct_ifaliasreq_sz = sizeof(ifaliasreq);\nunsigned struct_ifcapreq_sz = sizeof(ifcapreq);\nunsigned struct_ifconf_sz = sizeof(ifconf);\nunsigned struct_ifdatareq_sz = sizeof(ifdatareq);\nunsigned struct_ifdrv_sz = sizeof(ifdrv);\nunsigned struct_ifmediareq_sz = sizeof(ifmediareq);\nunsigned struct_ifpppcstatsreq_sz = sizeof(ifpppcstatsreq);\nunsigned struct_ifpppstatsreq_sz = sizeof(ifpppstatsreq);\nunsigned struct_ifreq_sz = sizeof(ifreq);\nunsigned struct_in6_addrpolicy_sz = sizeof(in6_addrpolicy);\nunsigned struct_in6_ndireq_sz = sizeof(in6_ndireq);\nunsigned struct_ioc_load_unload_sz = sizeof(ioc_load_unload);\nunsigned struct_ioc_patch_sz = sizeof(ioc_patch);\nunsigned struct_ioc_play_blocks_sz = sizeof(ioc_play_blocks);\nunsigned struct_ioc_play_msf_sz = sizeof(ioc_play_msf);\nunsigned struct_ioc_play_track_sz = sizeof(ioc_play_track);\nunsigned struct_ioc_read_subchannel_sz = sizeof(ioc_read_subchannel);\nunsigned struct_ioc_read_toc_entry_sz = sizeof(ioc_read_toc_entry);\nunsigned struct_ioc_toc_header_sz = sizeof(ioc_toc_header);\nunsigned struct_ioc_vol_sz = sizeof(ioc_vol);\nunsigned struct_ioctl_pt_sz = sizeof(ioctl_pt);\nunsigned struct_ioppt_sz = sizeof(ioppt);\nunsigned struct_iovec_sz = sizeof(iovec);\nunsigned struct_ipfobj_sz = sizeof(ipfobj);\nunsigned struct_irda_params_sz = sizeof(irda_params);\nunsigned struct_isp_fc_device_sz = sizeof(isp_fc_device);\nunsigned struct_isp_fc_tsk_mgmt_sz = sizeof(isp_fc_tsk_mgmt);\nunsigned struct_isp_hba_device_sz = sizeof(isp_hba_device);\nunsigned struct_isv_cmd_sz = sizeof(isv_cmd);\nunsigned struct_jobs_add_class_sz = sizeof(jobs_add_class);\nunsigned struct_jobs_add_filter_sz = sizeof(jobs_add_filter);\nunsigned struct_jobs_attach_sz = sizeof(jobs_attach);\nunsigned struct_jobs_class_stats_sz = sizeof(jobs_class_stats);\nunsigned struct_jobs_delete_class_sz = sizeof(jobs_delete_class);\nunsigned struct_jobs_delete_filter_sz = sizeof(jobs_delete_filter);\nunsigned struct_jobs_interface_sz = sizeof(jobs_interface);\nunsigned struct_jobs_modify_class_sz = sizeof(jobs_modify_class);\nunsigned struct_kbentry_sz = sizeof(kbentry);\nunsigned struct_kfilter_mapping_sz = sizeof(kfilter_mapping);\nunsigned struct_kiockeymap_sz = sizeof(kiockeymap);\nunsigned struct_ksyms_gsymbol_sz = sizeof(ksyms_gsymbol);\nunsigned struct_ksyms_gvalue_sz = sizeof(ksyms_gvalue);\nunsigned struct_ksyms_ogsymbol_sz = sizeof(ksyms_ogsymbol);\nunsigned struct_kttcp_io_args_sz = sizeof(kttcp_io_args);\nunsigned struct_ltchars_sz = sizeof(ltchars);\nunsigned struct_lua_create_sz = sizeof(struct lua_create);\nunsigned struct_lua_info_sz = sizeof(struct lua_info);\nunsigned struct_lua_load_sz = sizeof(struct lua_load);\nunsigned struct_lua_require_sz = sizeof(lua_require);\nunsigned struct_mbpp_param_sz = sizeof(mbpp_param);\nunsigned struct_md_conf_sz = sizeof(md_conf);\nunsigned struct_meteor_capframe_sz = sizeof(meteor_capframe);\nunsigned struct_meteor_counts_sz = sizeof(meteor_counts);\nunsigned struct_meteor_geomet_sz = sizeof(meteor_geomet);\nunsigned struct_meteor_pixfmt_sz = sizeof(meteor_pixfmt);\nunsigned struct_meteor_video_sz = sizeof(meteor_video);\nunsigned struct_mlx_cinfo_sz = sizeof(mlx_cinfo);\nunsigned struct_mlx_pause_sz = sizeof(mlx_pause);\nunsigned struct_mlx_rebuild_request_sz = sizeof(mlx_rebuild_request);\nunsigned struct_mlx_rebuild_status_sz = sizeof(mlx_rebuild_status);\nunsigned struct_mlx_usercommand_sz = sizeof(mlx_usercommand);\nunsigned struct_mly_user_command_sz = sizeof(mly_user_command);\nunsigned struct_mly_user_health_sz = sizeof(mly_user_health);\nunsigned struct_mtget_sz = sizeof(mtget);\nunsigned struct_mtop_sz = sizeof(mtop);\nunsigned struct_npf_ioctl_table_sz = sizeof(npf_ioctl_table);\nunsigned struct_npioctl_sz = sizeof(npioctl);\nunsigned struct_nvme_pt_command_sz = sizeof(nvme_pt_command);\nunsigned struct_ochanger_element_status_request_sz =\n    sizeof(ochanger_element_status_request);\nunsigned struct_ofiocdesc_sz = sizeof(ofiocdesc);\nunsigned struct_okiockey_sz = sizeof(okiockey);\nunsigned struct_ortentry_sz = sizeof(ortentry);\nunsigned struct_oscsi_addr_sz = sizeof(oscsi_addr);\nunsigned struct_oss_audioinfo_sz = sizeof(oss_audioinfo);\nunsigned struct_oss_sysinfo_sz = sizeof(oss_sysinfo);\nunsigned struct_pciio_bdf_cfgreg_sz = sizeof(pciio_bdf_cfgreg);\nunsigned struct_pciio_businfo_sz = sizeof(pciio_businfo);\nunsigned struct_pciio_cfgreg_sz = sizeof(pciio_cfgreg);\nunsigned struct_pciio_drvname_sz = sizeof(pciio_drvname);\nunsigned struct_pciio_drvnameonbus_sz = sizeof(pciio_drvnameonbus);\nunsigned struct_pcvtid_sz = sizeof(pcvtid);\nunsigned struct_pf_osfp_ioctl_sz = sizeof(pf_osfp_ioctl);\nunsigned struct_pf_status_sz = sizeof(pf_status);\nunsigned struct_pfioc_altq_sz = sizeof(pfioc_altq);\nunsigned struct_pfioc_if_sz = sizeof(pfioc_if);\nunsigned struct_pfioc_iface_sz = sizeof(pfioc_iface);\nunsigned struct_pfioc_limit_sz = sizeof(pfioc_limit);\nunsigned struct_pfioc_natlook_sz = sizeof(pfioc_natlook);\nunsigned struct_pfioc_pooladdr_sz = sizeof(pfioc_pooladdr);\nunsigned struct_pfioc_qstats_sz = sizeof(pfioc_qstats);\nunsigned struct_pfioc_rule_sz = sizeof(pfioc_rule);\nunsigned struct_pfioc_ruleset_sz = sizeof(pfioc_ruleset);\nunsigned struct_pfioc_src_node_kill_sz = sizeof(pfioc_src_node_kill);\nunsigned struct_pfioc_src_nodes_sz = sizeof(pfioc_src_nodes);\nunsigned struct_pfioc_state_kill_sz = sizeof(pfioc_state_kill);\nunsigned struct_pfioc_state_sz = sizeof(pfioc_state);\nunsigned struct_pfioc_states_sz = sizeof(pfioc_states);\nunsigned struct_pfioc_table_sz = sizeof(pfioc_table);\nunsigned struct_pfioc_tm_sz = sizeof(pfioc_tm);\nunsigned struct_pfioc_trans_sz = sizeof(pfioc_trans);\nunsigned struct_plistref_sz = sizeof(plistref);\nunsigned struct_power_type_sz = sizeof(power_type);\nunsigned struct_ppp_idle_sz = sizeof(ppp_idle);\nunsigned struct_ppp_option_data_sz = sizeof(ppp_option_data);\nunsigned struct_ppp_rawin_sz = sizeof(ppp_rawin);\nunsigned struct_pppoeconnectionstate_sz = sizeof(pppoeconnectionstate);\nunsigned struct_pppoediscparms_sz = sizeof(pppoediscparms);\nunsigned struct_priq_add_class_sz = sizeof(priq_add_class);\nunsigned struct_priq_add_filter_sz = sizeof(priq_add_filter);\nunsigned struct_priq_class_stats_sz = sizeof(priq_class_stats);\nunsigned struct_priq_delete_class_sz = sizeof(priq_delete_class);\nunsigned struct_priq_delete_filter_sz = sizeof(priq_delete_filter);\nunsigned struct_priq_interface_sz = sizeof(priq_interface);\nunsigned struct_priq_modify_class_sz = sizeof(priq_modify_class);\nunsigned struct_ptmget_sz = sizeof(ptmget);\nunsigned struct_radio_info_sz = sizeof(radio_info);\nunsigned struct_red_conf_sz = sizeof(red_conf);\nunsigned struct_red_interface_sz = sizeof(red_interface);\nunsigned struct_red_stats_sz = sizeof(red_stats);\nunsigned struct_redparams_sz = sizeof(redparams);\nunsigned struct_rf_pmparams_sz = sizeof(rf_pmparams);\nunsigned struct_rf_pmstat_sz = sizeof(rf_pmstat);\nunsigned struct_rf_recon_req_sz = sizeof(rf_recon_req);\nunsigned struct_rio_conf_sz = sizeof(rio_conf);\nunsigned struct_rio_interface_sz = sizeof(rio_interface);\nunsigned struct_rio_stats_sz = sizeof(rio_stats);\nunsigned struct_scan_io_sz = sizeof(scan_io);\nunsigned struct_scbusaccel_args_sz = sizeof(scbusaccel_args);\nunsigned struct_scbusiodetach_args_sz = sizeof(scbusiodetach_args);\nunsigned struct_scbusioscan_args_sz = sizeof(scbusioscan_args);\nunsigned struct_scsi_addr_sz = sizeof(scsi_addr);\nunsigned struct_seq_event_rec_sz = sizeof(seq_event_rec);\nunsigned struct_session_op_sz = sizeof(session_op);\nunsigned struct_sgttyb_sz = sizeof(sgttyb);\nunsigned struct_sioc_sg_req_sz = sizeof(sioc_sg_req);\nunsigned struct_sioc_vif_req_sz = sizeof(sioc_vif_req);\nunsigned struct_smbioc_flags_sz = sizeof(smbioc_flags);\nunsigned struct_smbioc_lookup_sz = sizeof(smbioc_lookup);\nunsigned struct_smbioc_oshare_sz = sizeof(smbioc_oshare);\nunsigned struct_smbioc_ossn_sz = sizeof(smbioc_ossn);\nunsigned struct_smbioc_rq_sz = sizeof(smbioc_rq);\nunsigned struct_smbioc_rw_sz = sizeof(smbioc_rw);\nunsigned struct_spppauthcfg_sz = sizeof(spppauthcfg);\nunsigned struct_spppauthfailuresettings_sz = sizeof(spppauthfailuresettings);\nunsigned struct_spppauthfailurestats_sz = sizeof(spppauthfailurestats);\nunsigned struct_spppdnsaddrs_sz = sizeof(spppdnsaddrs);\nunsigned struct_spppdnssettings_sz = sizeof(spppdnssettings);\nunsigned struct_spppidletimeout_sz = sizeof(spppidletimeout);\nunsigned struct_spppkeepalivesettings_sz = sizeof(spppkeepalivesettings);\nunsigned struct_sppplcpcfg_sz = sizeof(sppplcpcfg);\nunsigned struct_spppstatus_sz = sizeof(spppstatus);\nunsigned struct_spppstatusncp_sz = sizeof(spppstatusncp);\nunsigned struct_srt_rt_sz = sizeof(srt_rt);\nunsigned struct_stic_xinfo_sz = sizeof(stic_xinfo);\nunsigned struct_sun_dkctlr_sz = sizeof(sun_dkctlr);\nunsigned struct_sun_dkgeom_sz = sizeof(sun_dkgeom);\nunsigned struct_sun_dkpart_sz = sizeof(sun_dkpart);\nunsigned struct_synth_info_sz = sizeof(synth_info);\nunsigned struct_tbrreq_sz = sizeof(tbrreq);\nunsigned struct_tchars_sz = sizeof(tchars);\nunsigned struct_termios_sz = sizeof(termios);\nunsigned struct_timeval_sz = sizeof(timeval);\nunsigned struct_twe_drivecommand_sz = sizeof(twe_drivecommand);\nunsigned struct_twe_paramcommand_sz = sizeof(twe_paramcommand);\nunsigned struct_twe_usercommand_sz = sizeof(twe_usercommand);\nunsigned struct_ukyopon_identify_sz = sizeof(ukyopon_identify);\nunsigned struct_urio_command_sz = sizeof(urio_command);\nunsigned struct_usb_alt_interface_sz = sizeof(usb_alt_interface);\nunsigned struct_usb_bulk_ra_wb_opt_sz = sizeof(usb_bulk_ra_wb_opt);\nunsigned struct_usb_config_desc_sz = sizeof(usb_config_desc);\nunsigned struct_usb_ctl_report_desc_sz = sizeof(usb_ctl_report_desc);\nunsigned struct_usb_ctl_report_sz = sizeof(usb_ctl_report);\nunsigned struct_usb_ctl_request_sz = sizeof(usb_ctl_request);\n#if defined(__x86_64__)\nunsigned struct_nvmm_ioc_capability_sz = sizeof(nvmm_ioc_capability);\nunsigned struct_nvmm_ioc_machine_create_sz = sizeof(nvmm_ioc_machine_create);\nunsigned struct_nvmm_ioc_machine_destroy_sz = sizeof(nvmm_ioc_machine_destroy);\nunsigned struct_nvmm_ioc_machine_configure_sz =\n    sizeof(nvmm_ioc_machine_configure);\nunsigned struct_nvmm_ioc_vcpu_create_sz = sizeof(nvmm_ioc_vcpu_create);\nunsigned struct_nvmm_ioc_vcpu_destroy_sz = sizeof(nvmm_ioc_vcpu_destroy);\nunsigned struct_nvmm_ioc_vcpu_configure_sz = sizeof(nvmm_ioc_vcpu_configure);\nunsigned struct_nvmm_ioc_vcpu_setstate_sz = sizeof(nvmm_ioc_vcpu_destroy);\nunsigned struct_nvmm_ioc_vcpu_getstate_sz = sizeof(nvmm_ioc_vcpu_getstate);\nunsigned struct_nvmm_ioc_vcpu_inject_sz = sizeof(nvmm_ioc_vcpu_inject);\nunsigned struct_nvmm_ioc_vcpu_run_sz = sizeof(nvmm_ioc_vcpu_run);\nunsigned struct_nvmm_ioc_gpa_map_sz = sizeof(nvmm_ioc_gpa_map);\nunsigned struct_nvmm_ioc_gpa_unmap_sz = sizeof(nvmm_ioc_gpa_unmap);\nunsigned struct_nvmm_ioc_hva_map_sz = sizeof(nvmm_ioc_hva_map);\nunsigned struct_nvmm_ioc_hva_unmap_sz = sizeof(nvmm_ioc_hva_unmap);\nunsigned struct_nvmm_ioc_ctl_sz = sizeof(nvmm_ioc_ctl);\n#endif\nunsigned struct_spi_ioctl_configure_sz = sizeof(spi_ioctl_configure);\nunsigned struct_spi_ioctl_transfer_sz = sizeof(spi_ioctl_transfer);\nunsigned struct_autofs_daemon_request_sz = sizeof(autofs_daemon_request);\nunsigned struct_autofs_daemon_done_sz = sizeof(autofs_daemon_done);\nunsigned struct_sctp_connectx_addrs_sz = sizeof(sctp_connectx_addrs);\nunsigned struct_usb_device_info_old_sz = sizeof(usb_device_info_old);\nunsigned struct_usb_device_info_sz = sizeof(usb_device_info);\nunsigned struct_usb_device_stats_sz = sizeof(usb_device_stats);\nunsigned struct_usb_endpoint_desc_sz = sizeof(usb_endpoint_desc);\nunsigned struct_usb_full_desc_sz = sizeof(usb_full_desc);\nunsigned struct_usb_interface_desc_sz = sizeof(usb_interface_desc);\nunsigned struct_usb_string_desc_sz = sizeof(usb_string_desc);\nunsigned struct_utoppy_readfile_sz = sizeof(utoppy_readfile);\nunsigned struct_utoppy_rename_sz = sizeof(utoppy_rename);\nunsigned struct_utoppy_stats_sz = sizeof(utoppy_stats);\nunsigned struct_utoppy_writefile_sz = sizeof(utoppy_writefile);\nunsigned struct_v4l2_audio_sz = sizeof(v4l2_audio);\nunsigned struct_v4l2_audioout_sz = sizeof(v4l2_audioout);\nunsigned struct_v4l2_buffer_sz = sizeof(v4l2_buffer);\nunsigned struct_v4l2_capability_sz = sizeof(v4l2_capability);\nunsigned struct_v4l2_control_sz = sizeof(v4l2_control);\nunsigned struct_v4l2_crop_sz = sizeof(v4l2_crop);\nunsigned struct_v4l2_cropcap_sz = sizeof(v4l2_cropcap);\nunsigned struct_v4l2_fmtdesc_sz = sizeof(v4l2_fmtdesc);\nunsigned struct_v4l2_format_sz = sizeof(v4l2_format);\nunsigned struct_v4l2_framebuffer_sz = sizeof(v4l2_framebuffer);\nunsigned struct_v4l2_frequency_sz = sizeof(v4l2_frequency);\nunsigned struct_v4l2_frmivalenum_sz = sizeof(v4l2_frmivalenum);\nunsigned struct_v4l2_frmsizeenum_sz = sizeof(v4l2_frmsizeenum);\nunsigned struct_v4l2_input_sz = sizeof(v4l2_input);\nunsigned struct_v4l2_jpegcompression_sz = sizeof(v4l2_jpegcompression);\nunsigned struct_v4l2_modulator_sz = sizeof(v4l2_modulator);\nunsigned struct_v4l2_output_sz = sizeof(v4l2_output);\nunsigned struct_v4l2_queryctrl_sz = sizeof(v4l2_queryctrl);\nunsigned struct_v4l2_querymenu_sz = sizeof(v4l2_querymenu);\nunsigned struct_v4l2_requestbuffers_sz = sizeof(v4l2_requestbuffers);\nunsigned struct_v4l2_standard_sz = sizeof(v4l2_standard);\nunsigned struct_v4l2_streamparm_sz = sizeof(v4l2_streamparm);\nunsigned struct_v4l2_tuner_sz = sizeof(v4l2_tuner);\nunsigned struct_vnd_ioctl_sz = sizeof(vnd_ioctl);\nunsigned struct_vnd_user_sz = sizeof(vnd_user);\nunsigned struct_vt_stat_sz = sizeof(vt_stat);\nunsigned struct_wdog_conf_sz = sizeof(wdog_conf);\nunsigned struct_wdog_mode_sz = sizeof(wdog_mode);\nunsigned struct_ipmi_recv_sz = sizeof(ipmi_recv);\nunsigned struct_ipmi_req_sz = sizeof(ipmi_req);\nunsigned struct_ipmi_cmdspec_sz = sizeof(ipmi_cmdspec);\nunsigned struct_wfq_conf_sz = sizeof(wfq_conf);\nunsigned struct_wfq_getqid_sz = sizeof(wfq_getqid);\nunsigned struct_wfq_getstats_sz = sizeof(wfq_getstats);\nunsigned struct_wfq_interface_sz = sizeof(wfq_interface);\nunsigned struct_wfq_setweight_sz = sizeof(wfq_setweight);\nunsigned struct_winsize_sz = sizeof(winsize);\nunsigned struct_wscons_event_sz = sizeof(wscons_event);\nunsigned struct_wsdisplay_addscreendata_sz = sizeof(wsdisplay_addscreendata);\nunsigned struct_wsdisplay_char_sz = sizeof(wsdisplay_char);\nunsigned struct_wsdisplay_cmap_sz = sizeof(wsdisplay_cmap);\nunsigned struct_wsdisplay_curpos_sz = sizeof(wsdisplay_curpos);\nunsigned struct_wsdisplay_cursor_sz = sizeof(wsdisplay_cursor);\nunsigned struct_wsdisplay_delscreendata_sz = sizeof(wsdisplay_delscreendata);\nunsigned struct_wsdisplay_fbinfo_sz = sizeof(wsdisplay_fbinfo);\nunsigned struct_wsdisplay_font_sz = sizeof(wsdisplay_font);\nunsigned struct_wsdisplay_kbddata_sz = sizeof(wsdisplay_kbddata);\nunsigned struct_wsdisplay_msgattrs_sz = sizeof(wsdisplay_msgattrs);\nunsigned struct_wsdisplay_param_sz = sizeof(wsdisplay_param);\nunsigned struct_wsdisplay_scroll_data_sz = sizeof(wsdisplay_scroll_data);\nunsigned struct_wsdisplay_usefontdata_sz = sizeof(wsdisplay_usefontdata);\nunsigned struct_wsdisplayio_blit_sz = sizeof(wsdisplayio_blit);\nunsigned struct_wsdisplayio_bus_id_sz = sizeof(wsdisplayio_bus_id);\nunsigned struct_wsdisplayio_edid_info_sz = sizeof(wsdisplayio_edid_info);\nunsigned struct_wsdisplayio_fbinfo_sz = sizeof(wsdisplayio_fbinfo);\nunsigned struct_wskbd_bell_data_sz = sizeof(wskbd_bell_data);\nunsigned struct_wskbd_keyrepeat_data_sz = sizeof(wskbd_keyrepeat_data);\nunsigned struct_wskbd_map_data_sz = sizeof(wskbd_map_data);\nunsigned struct_wskbd_scroll_data_sz = sizeof(wskbd_scroll_data);\nunsigned struct_wsmouse_calibcoords_sz = sizeof(wsmouse_calibcoords);\nunsigned struct_wsmouse_id_sz = sizeof(wsmouse_id);\nunsigned struct_wsmouse_repeat_sz = sizeof(wsmouse_repeat);\nunsigned struct_wsmux_device_list_sz = sizeof(wsmux_device_list);\nunsigned struct_wsmux_device_sz = sizeof(wsmux_device);\nunsigned struct_xd_iocmd_sz = sizeof(xd_iocmd);\n\nunsigned struct_scsireq_sz = sizeof(struct scsireq);\nunsigned struct_tone_sz = sizeof(tone_t);\nunsigned union_twe_statrequest_sz = sizeof(union twe_statrequest);\nunsigned struct_usb_device_descriptor_sz = sizeof(usb_device_descriptor_t);\nunsigned struct_vt_mode_sz = sizeof(struct vt_mode);\nunsigned struct__old_mixer_info_sz = sizeof(struct _old_mixer_info);\nunsigned struct__agp_allocate_sz = sizeof(struct _agp_allocate);\nunsigned struct__agp_bind_sz = sizeof(struct _agp_bind);\nunsigned struct__agp_info_sz = sizeof(struct _agp_info);\nunsigned struct__agp_setup_sz = sizeof(struct _agp_setup);\nunsigned struct__agp_unbind_sz = sizeof(struct _agp_unbind);\nunsigned struct_atareq_sz = sizeof(struct atareq);\nunsigned struct_cpustate_sz = sizeof(struct cpustate);\nunsigned struct_dmx_caps_sz = sizeof(struct dmx_caps);\nunsigned enum_dmx_source_sz = sizeof(dmx_source_t);\nunsigned union_dvd_authinfo_sz = sizeof(dvd_authinfo);\nunsigned union_dvd_struct_sz = sizeof(dvd_struct);\nunsigned enum_v4l2_priority_sz = sizeof(enum v4l2_priority);\nunsigned struct_envsys_basic_info_sz = sizeof(struct envsys_basic_info);\nunsigned struct_envsys_tre_data_sz = sizeof(struct envsys_tre_data);\nunsigned enum_fe_sec_mini_cmd_sz = sizeof(enum fe_sec_mini_cmd);\nunsigned enum_fe_sec_tone_mode_sz = sizeof(enum fe_sec_tone_mode);\nunsigned enum_fe_sec_voltage_sz = sizeof(enum fe_sec_voltage);\nunsigned enum_fe_status_sz = sizeof(enum fe_status);\nunsigned struct_gdt_ctrt_sz = sizeof(struct gdt_ctrt);\nunsigned struct_gdt_event_sz = sizeof(struct gdt_event);\nunsigned struct_gdt_osv_sz = sizeof(struct gdt_osv);\nunsigned struct_gdt_rescan_sz = sizeof(struct gdt_rescan);\nunsigned struct_gdt_statist_sz = sizeof(struct gdt_statist);\nunsigned struct_gdt_ucmd_sz = sizeof(struct gdt_ucmd);\nunsigned struct_iscsi_conn_status_parameters_sz =\n    sizeof(iscsi_conn_status_parameters_t);\nunsigned struct_iscsi_get_version_parameters_sz =\n    sizeof(iscsi_get_version_parameters_t);\nunsigned struct_iscsi_iocommand_parameters_sz =\n    sizeof(iscsi_iocommand_parameters_t);\nunsigned struct_iscsi_login_parameters_sz = sizeof(iscsi_login_parameters_t);\nunsigned struct_iscsi_logout_parameters_sz = sizeof(iscsi_logout_parameters_t);\nunsigned struct_iscsi_register_event_parameters_sz =\n    sizeof(iscsi_register_event_parameters_t);\nunsigned struct_iscsi_remove_parameters_sz = sizeof(iscsi_remove_parameters_t);\nunsigned struct_iscsi_send_targets_parameters_sz =\n    sizeof(iscsi_send_targets_parameters_t);\nunsigned struct_iscsi_set_node_name_parameters_sz =\n    sizeof(iscsi_set_node_name_parameters_t);\nunsigned struct_iscsi_wait_event_parameters_sz =\n    sizeof(iscsi_wait_event_parameters_t);\nunsigned struct_isp_stats_sz = sizeof(isp_stats_t);\nunsigned struct_lsenable_sz = sizeof(struct lsenable);\nunsigned struct_lsdisable_sz = sizeof(struct lsdisable);\nunsigned struct_audio_format_query_sz = sizeof(audio_format_query);\nunsigned struct_mixer_ctrl_sz = sizeof(struct mixer_ctrl);\nunsigned struct_mixer_devinfo_sz = sizeof(struct mixer_devinfo);\nunsigned struct_mpu_command_rec_sz = sizeof(mpu_command_rec);\nunsigned struct_rndstat_sz = sizeof(rndstat_t);\nunsigned struct_rndstat_name_sz = sizeof(rndstat_name_t);\nunsigned struct_rndctl_sz = sizeof(rndctl_t);\nunsigned struct_rnddata_sz = sizeof(rnddata_t);\nunsigned struct_rndpoolstat_sz = sizeof(rndpoolstat_t);\nunsigned struct_rndstat_est_sz = sizeof(rndstat_est_t);\nunsigned struct_rndstat_est_name_sz = sizeof(rndstat_est_name_t);\nunsigned struct_pps_params_sz = sizeof(pps_params_t);\nunsigned struct_pps_info_sz = sizeof(pps_info_t);\nunsigned struct_mixer_info_sz = sizeof(struct mixer_info);\nunsigned struct_RF_SparetWait_sz = sizeof(RF_SparetWait_t);\nunsigned struct_RF_ComponentLabel_sz = sizeof(RF_ComponentLabel_t);\nunsigned struct_RF_SingleComponent_sz = sizeof(RF_SingleComponent_t);\nunsigned struct_RF_ProgressInfo_sz = sizeof(RF_ProgressInfo_t);\nunsigned struct_nvlist_ref_sz = sizeof(struct __sanitizer_nvlist_ref_t);\nunsigned struct_StringList_sz = sizeof(StringList);\n\nconst unsigned IOCTL_NOT_PRESENT = 0;\n\nunsigned IOCTL_AFM_ADDFMAP = AFM_ADDFMAP;\nunsigned IOCTL_AFM_DELFMAP = AFM_DELFMAP;\nunsigned IOCTL_AFM_CLEANFMAP = AFM_CLEANFMAP;\nunsigned IOCTL_AFM_GETFMAP = AFM_GETFMAP;\nunsigned IOCTL_ALTQGTYPE = ALTQGTYPE;\nunsigned IOCTL_ALTQTBRSET = ALTQTBRSET;\nunsigned IOCTL_ALTQTBRGET = ALTQTBRGET;\nunsigned IOCTL_BLUE_IF_ATTACH = BLUE_IF_ATTACH;\nunsigned IOCTL_BLUE_IF_DETACH = BLUE_IF_DETACH;\nunsigned IOCTL_BLUE_ENABLE = BLUE_ENABLE;\nunsigned IOCTL_BLUE_DISABLE = BLUE_DISABLE;\nunsigned IOCTL_BLUE_CONFIG = BLUE_CONFIG;\nunsigned IOCTL_BLUE_GETSTATS = BLUE_GETSTATS;\nunsigned IOCTL_CBQ_IF_ATTACH = CBQ_IF_ATTACH;\nunsigned IOCTL_CBQ_IF_DETACH = CBQ_IF_DETACH;\nunsigned IOCTL_CBQ_ENABLE = CBQ_ENABLE;\nunsigned IOCTL_CBQ_DISABLE = CBQ_DISABLE;\nunsigned IOCTL_CBQ_CLEAR_HIERARCHY = CBQ_CLEAR_HIERARCHY;\nunsigned IOCTL_CBQ_ADD_CLASS = CBQ_ADD_CLASS;\nunsigned IOCTL_CBQ_DEL_CLASS = CBQ_DEL_CLASS;\nunsigned IOCTL_CBQ_MODIFY_CLASS = CBQ_MODIFY_CLASS;\nunsigned IOCTL_CBQ_ADD_FILTER = CBQ_ADD_FILTER;\nunsigned IOCTL_CBQ_DEL_FILTER = CBQ_DEL_FILTER;\nunsigned IOCTL_CBQ_GETSTATS = CBQ_GETSTATS;\nunsigned IOCTL_CDNR_IF_ATTACH = CDNR_IF_ATTACH;\nunsigned IOCTL_CDNR_IF_DETACH = CDNR_IF_DETACH;\nunsigned IOCTL_CDNR_ENABLE = CDNR_ENABLE;\nunsigned IOCTL_CDNR_DISABLE = CDNR_DISABLE;\nunsigned IOCTL_CDNR_ADD_FILTER = CDNR_ADD_FILTER;\nunsigned IOCTL_CDNR_DEL_FILTER = CDNR_DEL_FILTER;\nunsigned IOCTL_CDNR_GETSTATS = CDNR_GETSTATS;\nunsigned IOCTL_CDNR_ADD_ELEM = CDNR_ADD_ELEM;\nunsigned IOCTL_CDNR_DEL_ELEM = CDNR_DEL_ELEM;\nunsigned IOCTL_CDNR_ADD_TBM = CDNR_ADD_TBM;\nunsigned IOCTL_CDNR_MOD_TBM = CDNR_MOD_TBM;\nunsigned IOCTL_CDNR_TBM_STATS = CDNR_TBM_STATS;\nunsigned IOCTL_CDNR_ADD_TCM = CDNR_ADD_TCM;\nunsigned IOCTL_CDNR_MOD_TCM = CDNR_MOD_TCM;\nunsigned IOCTL_CDNR_TCM_STATS = CDNR_TCM_STATS;\nunsigned IOCTL_CDNR_ADD_TSW = CDNR_ADD_TSW;\nunsigned IOCTL_CDNR_MOD_TSW = CDNR_MOD_TSW;\nunsigned IOCTL_FIFOQ_IF_ATTACH = FIFOQ_IF_ATTACH;\nunsigned IOCTL_FIFOQ_IF_DETACH = FIFOQ_IF_DETACH;\nunsigned IOCTL_FIFOQ_ENABLE = FIFOQ_ENABLE;\nunsigned IOCTL_FIFOQ_DISABLE = FIFOQ_DISABLE;\nunsigned IOCTL_FIFOQ_CONFIG = FIFOQ_CONFIG;\nunsigned IOCTL_FIFOQ_GETSTATS = FIFOQ_GETSTATS;\nunsigned IOCTL_HFSC_IF_ATTACH = HFSC_IF_ATTACH;\nunsigned IOCTL_HFSC_IF_DETACH = HFSC_IF_DETACH;\nunsigned IOCTL_HFSC_ENABLE = HFSC_ENABLE;\nunsigned IOCTL_HFSC_DISABLE = HFSC_DISABLE;\nunsigned IOCTL_HFSC_CLEAR_HIERARCHY = HFSC_CLEAR_HIERARCHY;\nunsigned IOCTL_HFSC_ADD_CLASS = HFSC_ADD_CLASS;\nunsigned IOCTL_HFSC_DEL_CLASS = HFSC_DEL_CLASS;\nunsigned IOCTL_HFSC_MOD_CLASS = HFSC_MOD_CLASS;\nunsigned IOCTL_HFSC_ADD_FILTER = HFSC_ADD_FILTER;\nunsigned IOCTL_HFSC_DEL_FILTER = HFSC_DEL_FILTER;\nunsigned IOCTL_HFSC_GETSTATS = HFSC_GETSTATS;\nunsigned IOCTL_JOBS_IF_ATTACH = JOBS_IF_ATTACH;\nunsigned IOCTL_JOBS_IF_DETACH = JOBS_IF_DETACH;\nunsigned IOCTL_JOBS_ENABLE = JOBS_ENABLE;\nunsigned IOCTL_JOBS_DISABLE = JOBS_DISABLE;\nunsigned IOCTL_JOBS_CLEAR = JOBS_CLEAR;\nunsigned IOCTL_JOBS_ADD_CLASS = JOBS_ADD_CLASS;\nunsigned IOCTL_JOBS_DEL_CLASS = JOBS_DEL_CLASS;\nunsigned IOCTL_JOBS_MOD_CLASS = JOBS_MOD_CLASS;\nunsigned IOCTL_JOBS_ADD_FILTER = JOBS_ADD_FILTER;\nunsigned IOCTL_JOBS_DEL_FILTER = JOBS_DEL_FILTER;\nunsigned IOCTL_JOBS_GETSTATS = JOBS_GETSTATS;\nunsigned IOCTL_PRIQ_IF_ATTACH = PRIQ_IF_ATTACH;\nunsigned IOCTL_PRIQ_IF_DETACH = PRIQ_IF_DETACH;\nunsigned IOCTL_PRIQ_ENABLE = PRIQ_ENABLE;\nunsigned IOCTL_PRIQ_DISABLE = PRIQ_DISABLE;\nunsigned IOCTL_PRIQ_CLEAR = PRIQ_CLEAR;\nunsigned IOCTL_PRIQ_ADD_CLASS = PRIQ_ADD_CLASS;\nunsigned IOCTL_PRIQ_DEL_CLASS = PRIQ_DEL_CLASS;\nunsigned IOCTL_PRIQ_MOD_CLASS = PRIQ_MOD_CLASS;\nunsigned IOCTL_PRIQ_ADD_FILTER = PRIQ_ADD_FILTER;\nunsigned IOCTL_PRIQ_DEL_FILTER = PRIQ_DEL_FILTER;\nunsigned IOCTL_PRIQ_GETSTATS = PRIQ_GETSTATS;\nunsigned IOCTL_RED_IF_ATTACH = RED_IF_ATTACH;\nunsigned IOCTL_RED_IF_DETACH = RED_IF_DETACH;\nunsigned IOCTL_RED_ENABLE = RED_ENABLE;\nunsigned IOCTL_RED_DISABLE = RED_DISABLE;\nunsigned IOCTL_RED_CONFIG = RED_CONFIG;\nunsigned IOCTL_RED_GETSTATS = RED_GETSTATS;\nunsigned IOCTL_RED_SETDEFAULTS = RED_SETDEFAULTS;\nunsigned IOCTL_RIO_IF_ATTACH = RIO_IF_ATTACH;\nunsigned IOCTL_RIO_IF_DETACH = RIO_IF_DETACH;\nunsigned IOCTL_RIO_ENABLE = RIO_ENABLE;\nunsigned IOCTL_RIO_DISABLE = RIO_DISABLE;\nunsigned IOCTL_RIO_CONFIG = RIO_CONFIG;\nunsigned IOCTL_RIO_GETSTATS = RIO_GETSTATS;\nunsigned IOCTL_RIO_SETDEFAULTS = RIO_SETDEFAULTS;\nunsigned IOCTL_WFQ_IF_ATTACH = WFQ_IF_ATTACH;\nunsigned IOCTL_WFQ_IF_DETACH = WFQ_IF_DETACH;\nunsigned IOCTL_WFQ_ENABLE = WFQ_ENABLE;\nunsigned IOCTL_WFQ_DISABLE = WFQ_DISABLE;\nunsigned IOCTL_WFQ_CONFIG = WFQ_CONFIG;\nunsigned IOCTL_WFQ_GET_STATS = WFQ_GET_STATS;\nunsigned IOCTL_WFQ_GET_QID = WFQ_GET_QID;\nunsigned IOCTL_WFQ_SET_WEIGHT = WFQ_SET_WEIGHT;\nunsigned IOCTL_CRIOGET = CRIOGET;\nunsigned IOCTL_CIOCFSESSION = CIOCFSESSION;\nunsigned IOCTL_CIOCKEY = CIOCKEY;\nunsigned IOCTL_CIOCNFKEYM = CIOCNFKEYM;\nunsigned IOCTL_CIOCNFSESSION = CIOCNFSESSION;\nunsigned IOCTL_CIOCNCRYPTRETM = CIOCNCRYPTRETM;\nunsigned IOCTL_CIOCNCRYPTRET = CIOCNCRYPTRET;\nunsigned IOCTL_CIOCGSESSION = CIOCGSESSION;\nunsigned IOCTL_CIOCNGSESSION = CIOCNGSESSION;\nunsigned IOCTL_CIOCCRYPT = CIOCCRYPT;\nunsigned IOCTL_CIOCNCRYPTM = CIOCNCRYPTM;\nunsigned IOCTL_CIOCASYMFEAT = CIOCASYMFEAT;\nunsigned IOCTL_APM_IOC_REJECT = APM_IOC_REJECT;\nunsigned IOCTL_APM_IOC_STANDBY = APM_IOC_STANDBY;\nunsigned IOCTL_APM_IOC_SUSPEND = APM_IOC_SUSPEND;\nunsigned IOCTL_OAPM_IOC_GETPOWER = OAPM_IOC_GETPOWER;\nunsigned IOCTL_APM_IOC_GETPOWER = APM_IOC_GETPOWER;\nunsigned IOCTL_APM_IOC_NEXTEVENT = APM_IOC_NEXTEVENT;\nunsigned IOCTL_APM_IOC_DEV_CTL = APM_IOC_DEV_CTL;\nunsigned IOCTL_NETBSD_DM_IOCTL = NETBSD_DM_IOCTL;\nunsigned IOCTL_DMIO_SETFUNC = DMIO_SETFUNC;\nunsigned IOCTL_DMX_START = DMX_START;\nunsigned IOCTL_DMX_STOP = DMX_STOP;\nunsigned IOCTL_DMX_SET_FILTER = DMX_SET_FILTER;\nunsigned IOCTL_DMX_SET_PES_FILTER = DMX_SET_PES_FILTER;\nunsigned IOCTL_DMX_SET_BUFFER_SIZE = DMX_SET_BUFFER_SIZE;\nunsigned IOCTL_DMX_GET_STC = DMX_GET_STC;\nunsigned IOCTL_DMX_ADD_PID = DMX_ADD_PID;\nunsigned IOCTL_DMX_REMOVE_PID = DMX_REMOVE_PID;\nunsigned IOCTL_DMX_GET_CAPS = DMX_GET_CAPS;\nunsigned IOCTL_DMX_SET_SOURCE = DMX_SET_SOURCE;\nunsigned IOCTL_FE_READ_STATUS = FE_READ_STATUS;\nunsigned IOCTL_FE_READ_BER = FE_READ_BER;\nunsigned IOCTL_FE_READ_SNR = FE_READ_SNR;\nunsigned IOCTL_FE_READ_SIGNAL_STRENGTH = FE_READ_SIGNAL_STRENGTH;\nunsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS = FE_READ_UNCORRECTED_BLOCKS;\nunsigned IOCTL_FE_SET_FRONTEND = FE_SET_FRONTEND;\nunsigned IOCTL_FE_GET_FRONTEND = FE_GET_FRONTEND;\nunsigned IOCTL_FE_GET_EVENT = FE_GET_EVENT;\nunsigned IOCTL_FE_GET_INFO = FE_GET_INFO;\nunsigned IOCTL_FE_DISEQC_RESET_OVERLOAD = FE_DISEQC_RESET_OVERLOAD;\nunsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD = FE_DISEQC_SEND_MASTER_CMD;\nunsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY = FE_DISEQC_RECV_SLAVE_REPLY;\nunsigned IOCTL_FE_DISEQC_SEND_BURST = FE_DISEQC_SEND_BURST;\nunsigned IOCTL_FE_SET_TONE = FE_SET_TONE;\nunsigned IOCTL_FE_SET_VOLTAGE = FE_SET_VOLTAGE;\nunsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE = FE_ENABLE_HIGH_LNB_VOLTAGE;\nunsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE = FE_SET_FRONTEND_TUNE_MODE;\nunsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD = FE_DISHNETWORK_SEND_LEGACY_CMD;\nunsigned IOCTL_FILEMON_SET_FD = FILEMON_SET_FD;\nunsigned IOCTL_FILEMON_SET_PID = FILEMON_SET_PID;\nunsigned IOCTL_HDAUDIO_FGRP_INFO = HDAUDIO_FGRP_INFO;\nunsigned IOCTL_HDAUDIO_FGRP_GETCONFIG = HDAUDIO_FGRP_GETCONFIG;\nunsigned IOCTL_HDAUDIO_FGRP_SETCONFIG = HDAUDIO_FGRP_SETCONFIG;\nunsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO = HDAUDIO_FGRP_WIDGET_INFO;\nunsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO = HDAUDIO_FGRP_CODEC_INFO;\nunsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO = HDAUDIO_AFG_WIDGET_INFO;\nunsigned IOCTL_HDAUDIO_AFG_CODEC_INFO = HDAUDIO_AFG_CODEC_INFO;\nunsigned IOCTL_CEC_GET_PHYS_ADDR = CEC_GET_PHYS_ADDR;\nunsigned IOCTL_CEC_GET_LOG_ADDRS = CEC_GET_LOG_ADDRS;\nunsigned IOCTL_CEC_SET_LOG_ADDRS = CEC_SET_LOG_ADDRS;\nunsigned IOCTL_CEC_GET_VENDOR_ID = CEC_GET_VENDOR_ID;\nunsigned IOCTL_HPCFBIO_GCONF = HPCFBIO_GCONF;\nunsigned IOCTL_HPCFBIO_SCONF = HPCFBIO_SCONF;\nunsigned IOCTL_HPCFBIO_GDSPCONF = HPCFBIO_GDSPCONF;\nunsigned IOCTL_HPCFBIO_SDSPCONF = HPCFBIO_SDSPCONF;\nunsigned IOCTL_HPCFBIO_GOP = HPCFBIO_GOP;\nunsigned IOCTL_HPCFBIO_SOP = HPCFBIO_SOP;\nunsigned IOCTL_IOPIOCPT = IOPIOCPT;\nunsigned IOCTL_IOPIOCGLCT = IOPIOCGLCT;\nunsigned IOCTL_IOPIOCGSTATUS = IOPIOCGSTATUS;\nunsigned IOCTL_IOPIOCRECONFIG = IOPIOCRECONFIG;\nunsigned IOCTL_IOPIOCGTIDMAP = IOPIOCGTIDMAP;\nunsigned IOCTL_SIOCGATHSTATS = SIOCGATHSTATS;\nunsigned IOCTL_SIOCGATHDIAG = SIOCGATHDIAG;\nunsigned IOCTL_METEORCAPTUR = METEORCAPTUR;\nunsigned IOCTL_METEORCAPFRM = METEORCAPFRM;\nunsigned IOCTL_METEORSETGEO = METEORSETGEO;\nunsigned IOCTL_METEORGETGEO = METEORGETGEO;\nunsigned IOCTL_METEORSTATUS = METEORSTATUS;\nunsigned IOCTL_METEORSHUE = METEORSHUE;\nunsigned IOCTL_METEORGHUE = METEORGHUE;\nunsigned IOCTL_METEORSFMT = METEORSFMT;\nunsigned IOCTL_METEORGFMT = METEORGFMT;\nunsigned IOCTL_METEORSINPUT = METEORSINPUT;\nunsigned IOCTL_METEORGINPUT = METEORGINPUT;\nunsigned IOCTL_METEORSCHCV = METEORSCHCV;\nunsigned IOCTL_METEORGCHCV = METEORGCHCV;\nunsigned IOCTL_METEORSCOUNT = METEORSCOUNT;\nunsigned IOCTL_METEORGCOUNT = METEORGCOUNT;\nunsigned IOCTL_METEORSFPS = METEORSFPS;\nunsigned IOCTL_METEORGFPS = METEORGFPS;\nunsigned IOCTL_METEORSSIGNAL = METEORSSIGNAL;\nunsigned IOCTL_METEORGSIGNAL = METEORGSIGNAL;\nunsigned IOCTL_METEORSVIDEO = METEORSVIDEO;\nunsigned IOCTL_METEORGVIDEO = METEORGVIDEO;\nunsigned IOCTL_METEORSBRIG = METEORSBRIG;\nunsigned IOCTL_METEORGBRIG = METEORGBRIG;\nunsigned IOCTL_METEORSCSAT = METEORSCSAT;\nunsigned IOCTL_METEORGCSAT = METEORGCSAT;\nunsigned IOCTL_METEORSCONT = METEORSCONT;\nunsigned IOCTL_METEORGCONT = METEORGCONT;\nunsigned IOCTL_METEORSHWS = METEORSHWS;\nunsigned IOCTL_METEORGHWS = METEORGHWS;\nunsigned IOCTL_METEORSVWS = METEORSVWS;\nunsigned IOCTL_METEORGVWS = METEORGVWS;\nunsigned IOCTL_METEORSTS = METEORSTS;\nunsigned IOCTL_METEORGTS = METEORGTS;\nunsigned IOCTL_TVTUNER_SETCHNL = TVTUNER_SETCHNL;\nunsigned IOCTL_TVTUNER_GETCHNL = TVTUNER_GETCHNL;\nunsigned IOCTL_TVTUNER_SETTYPE = TVTUNER_SETTYPE;\nunsigned IOCTL_TVTUNER_GETTYPE = TVTUNER_GETTYPE;\nunsigned IOCTL_TVTUNER_GETSTATUS = TVTUNER_GETSTATUS;\nunsigned IOCTL_TVTUNER_SETFREQ = TVTUNER_SETFREQ;\nunsigned IOCTL_TVTUNER_GETFREQ = TVTUNER_GETFREQ;\nunsigned IOCTL_TVTUNER_SETAFC = TVTUNER_SETAFC;\nunsigned IOCTL_TVTUNER_GETAFC = TVTUNER_GETAFC;\nunsigned IOCTL_RADIO_SETMODE = RADIO_SETMODE;\nunsigned IOCTL_RADIO_GETMODE = RADIO_GETMODE;\nunsigned IOCTL_RADIO_SETFREQ = RADIO_SETFREQ;\nunsigned IOCTL_RADIO_GETFREQ = RADIO_GETFREQ;\nunsigned IOCTL_METEORSACTPIXFMT = METEORSACTPIXFMT;\nunsigned IOCTL_METEORGACTPIXFMT = METEORGACTPIXFMT;\nunsigned IOCTL_METEORGSUPPIXFMT = METEORGSUPPIXFMT;\nunsigned IOCTL_TVTUNER_GETCHNLSET = TVTUNER_GETCHNLSET;\nunsigned IOCTL_REMOTE_GETKEY = REMOTE_GETKEY;\nunsigned IOCTL_GDT_IOCTL_GENERAL = GDT_IOCTL_GENERAL;\nunsigned IOCTL_GDT_IOCTL_DRVERS = GDT_IOCTL_DRVERS;\nunsigned IOCTL_GDT_IOCTL_CTRTYPE = GDT_IOCTL_CTRTYPE;\nunsigned IOCTL_GDT_IOCTL_OSVERS = GDT_IOCTL_OSVERS;\nunsigned IOCTL_GDT_IOCTL_CTRCNT = GDT_IOCTL_CTRCNT;\nunsigned IOCTL_GDT_IOCTL_EVENT = GDT_IOCTL_EVENT;\nunsigned IOCTL_GDT_IOCTL_STATIST = GDT_IOCTL_STATIST;\nunsigned IOCTL_GDT_IOCTL_RESCAN = GDT_IOCTL_RESCAN;\nunsigned IOCTL_ISP_SDBLEV = ISP_SDBLEV;\nunsigned IOCTL_ISP_RESETHBA = ISP_RESETHBA;\nunsigned IOCTL_ISP_RESCAN = ISP_RESCAN;\nunsigned IOCTL_ISP_SETROLE = ISP_SETROLE;\nunsigned IOCTL_ISP_GETROLE = ISP_GETROLE;\nunsigned IOCTL_ISP_GET_STATS = ISP_GET_STATS;\nunsigned IOCTL_ISP_CLR_STATS = ISP_CLR_STATS;\nunsigned IOCTL_ISP_FC_LIP = ISP_FC_LIP;\nunsigned IOCTL_ISP_FC_GETDINFO = ISP_FC_GETDINFO;\nunsigned IOCTL_ISP_GET_FW_CRASH_DUMP = ISP_GET_FW_CRASH_DUMP;\nunsigned IOCTL_ISP_FORCE_CRASH_DUMP = ISP_FORCE_CRASH_DUMP;\nunsigned IOCTL_ISP_FC_GETHINFO = ISP_FC_GETHINFO;\nunsigned IOCTL_ISP_TSK_MGMT = ISP_TSK_MGMT;\nunsigned IOCTL_ISP_FC_GETDLIST = ISP_FC_GETDLIST;\nunsigned IOCTL_MLXD_STATUS = MLXD_STATUS;\nunsigned IOCTL_MLXD_CHECKASYNC = MLXD_CHECKASYNC;\nunsigned IOCTL_MLXD_DETACH = MLXD_DETACH;\nunsigned IOCTL_MLX_RESCAN_DRIVES = MLX_RESCAN_DRIVES;\nunsigned IOCTL_MLX_PAUSE_CHANNEL = MLX_PAUSE_CHANNEL;\nunsigned IOCTL_MLX_COMMAND = MLX_COMMAND;\nunsigned IOCTL_MLX_REBUILDASYNC = MLX_REBUILDASYNC;\nunsigned IOCTL_MLX_REBUILDSTAT = MLX_REBUILDSTAT;\nunsigned IOCTL_MLX_GET_SYSDRIVE = MLX_GET_SYSDRIVE;\nunsigned IOCTL_MLX_GET_CINFO = MLX_GET_CINFO;\nunsigned IOCTL_NVME_PASSTHROUGH_CMD = NVME_PASSTHROUGH_CMD;\nunsigned IOCTL_FWCFGIO_SET_INDEX = FWCFGIO_SET_INDEX;\nunsigned IOCTL_IRDA_RESET_PARAMS = IRDA_RESET_PARAMS;\nunsigned IOCTL_IRDA_SET_PARAMS = IRDA_SET_PARAMS;\nunsigned IOCTL_IRDA_GET_SPEEDMASK = IRDA_GET_SPEEDMASK;\nunsigned IOCTL_IRDA_GET_TURNAROUNDMASK = IRDA_GET_TURNAROUNDMASK;\nunsigned IOCTL_IRFRAMETTY_GET_DEVICE = IRFRAMETTY_GET_DEVICE;\nunsigned IOCTL_IRFRAMETTY_GET_DONGLE = IRFRAMETTY_GET_DONGLE;\nunsigned IOCTL_IRFRAMETTY_SET_DONGLE = IRFRAMETTY_SET_DONGLE;\nunsigned IOCTL_ISV_CMD = ISV_CMD;\nunsigned IOCTL_WTQICMD = WTQICMD;\nunsigned IOCTL_ISCSI_GET_VERSION = ISCSI_GET_VERSION;\nunsigned IOCTL_ISCSI_LOGIN = ISCSI_LOGIN;\nunsigned IOCTL_ISCSI_LOGOUT = ISCSI_LOGOUT;\nunsigned IOCTL_ISCSI_ADD_CONNECTION = ISCSI_ADD_CONNECTION;\nunsigned IOCTL_ISCSI_RESTORE_CONNECTION = ISCSI_RESTORE_CONNECTION;\nunsigned IOCTL_ISCSI_REMOVE_CONNECTION = ISCSI_REMOVE_CONNECTION;\nunsigned IOCTL_ISCSI_CONNECTION_STATUS = ISCSI_CONNECTION_STATUS;\nunsigned IOCTL_ISCSI_SEND_TARGETS = ISCSI_SEND_TARGETS;\nunsigned IOCTL_ISCSI_SET_NODE_NAME = ISCSI_SET_NODE_NAME;\nunsigned IOCTL_ISCSI_IO_COMMAND = ISCSI_IO_COMMAND;\nunsigned IOCTL_ISCSI_REGISTER_EVENT = ISCSI_REGISTER_EVENT;\nunsigned IOCTL_ISCSI_DEREGISTER_EVENT = ISCSI_DEREGISTER_EVENT;\nunsigned IOCTL_ISCSI_WAIT_EVENT = ISCSI_WAIT_EVENT;\nunsigned IOCTL_ISCSI_POLL_EVENT = ISCSI_POLL_EVENT;\nunsigned IOCTL_OFIOCGET = OFIOCGET;\nunsigned IOCTL_OFIOCSET = OFIOCSET;\nunsigned IOCTL_OFIOCNEXTPROP = OFIOCNEXTPROP;\nunsigned IOCTL_OFIOCGETOPTNODE = OFIOCGETOPTNODE;\nunsigned IOCTL_OFIOCGETNEXT = OFIOCGETNEXT;\nunsigned IOCTL_OFIOCGETCHILD = OFIOCGETCHILD;\nunsigned IOCTL_OFIOCFINDDEVICE = OFIOCFINDDEVICE;\nunsigned IOCTL_AMR_IO_VERSION = AMR_IO_VERSION;\nunsigned IOCTL_AMR_IO_COMMAND = AMR_IO_COMMAND;\nunsigned IOCTL_MLYIO_COMMAND = MLYIO_COMMAND;\nunsigned IOCTL_MLYIO_HEALTH = MLYIO_HEALTH;\nunsigned IOCTL_PCI_IOC_CFGREAD = PCI_IOC_CFGREAD;\nunsigned IOCTL_PCI_IOC_CFGWRITE = PCI_IOC_CFGWRITE;\nunsigned IOCTL_PCI_IOC_BDF_CFGREAD = PCI_IOC_BDF_CFGREAD;\nunsigned IOCTL_PCI_IOC_BDF_CFGWRITE = PCI_IOC_BDF_CFGWRITE;\nunsigned IOCTL_PCI_IOC_BUSINFO = PCI_IOC_BUSINFO;\nunsigned IOCTL_PCI_IOC_DRVNAME = PCI_IOC_DRVNAME;\nunsigned IOCTL_PCI_IOC_DRVNAMEONBUS = PCI_IOC_DRVNAMEONBUS;\nunsigned IOCTL_TWEIO_COMMAND = TWEIO_COMMAND;\nunsigned IOCTL_TWEIO_STATS = TWEIO_STATS;\nunsigned IOCTL_TWEIO_AEN_POLL = TWEIO_AEN_POLL;\nunsigned IOCTL_TWEIO_AEN_WAIT = TWEIO_AEN_WAIT;\nunsigned IOCTL_TWEIO_SET_PARAM = TWEIO_SET_PARAM;\nunsigned IOCTL_TWEIO_GET_PARAM = TWEIO_GET_PARAM;\nunsigned IOCTL_TWEIO_RESET = TWEIO_RESET;\nunsigned IOCTL_TWEIO_ADD_UNIT = TWEIO_ADD_UNIT;\nunsigned IOCTL_TWEIO_DEL_UNIT = TWEIO_DEL_UNIT;\nunsigned IOCTL_SIOCSCNWDOMAIN = SIOCSCNWDOMAIN;\nunsigned IOCTL_SIOCGCNWDOMAIN = SIOCGCNWDOMAIN;\nunsigned IOCTL_SIOCSCNWKEY = SIOCSCNWKEY;\nunsigned IOCTL_SIOCGCNWSTATUS = SIOCGCNWSTATUS;\nunsigned IOCTL_SIOCGCNWSTATS = SIOCGCNWSTATS;\nunsigned IOCTL_SIOCGCNWTRAIL = SIOCGCNWTRAIL;\nunsigned IOCTL_SIOCGRAYSIGLEV = SIOCGRAYSIGLEV;\nunsigned IOCTL_RAIDFRAME_SHUTDOWN = RAIDFRAME_SHUTDOWN;\nunsigned IOCTL_RAIDFRAME_TUR = RAIDFRAME_TUR;\nunsigned IOCTL_RAIDFRAME_FAIL_DISK = RAIDFRAME_FAIL_DISK;\nunsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS = RAIDFRAME_CHECK_RECON_STATUS;\nunsigned IOCTL_RAIDFRAME_REWRITEPARITY = RAIDFRAME_REWRITEPARITY;\nunsigned IOCTL_RAIDFRAME_COPYBACK = RAIDFRAME_COPYBACK;\nunsigned IOCTL_RAIDFRAME_SPARET_WAIT = RAIDFRAME_SPARET_WAIT;\nunsigned IOCTL_RAIDFRAME_SEND_SPARET = RAIDFRAME_SEND_SPARET;\nunsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT = RAIDFRAME_ABORT_SPARET_WAIT;\nunsigned IOCTL_RAIDFRAME_START_ATRACE = RAIDFRAME_START_ATRACE;\nunsigned IOCTL_RAIDFRAME_STOP_ATRACE = RAIDFRAME_STOP_ATRACE;\nunsigned IOCTL_RAIDFRAME_GET_SIZE = RAIDFRAME_GET_SIZE;\nunsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS = RAIDFRAME_RESET_ACCTOTALS;\nunsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS = RAIDFRAME_KEEP_ACCTOTALS;\nunsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL = RAIDFRAME_GET_COMPONENT_LABEL;\nunsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL = RAIDFRAME_SET_COMPONENT_LABEL;\nunsigned IOCTL_RAIDFRAME_INIT_LABELS = RAIDFRAME_INIT_LABELS;\nunsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE = RAIDFRAME_ADD_HOT_SPARE;\nunsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE = RAIDFRAME_REMOVE_HOT_SPARE;\nunsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE = RAIDFRAME_REBUILD_IN_PLACE;\nunsigned IOCTL_RAIDFRAME_CHECK_PARITY = RAIDFRAME_CHECK_PARITY;\nunsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS =\n    RAIDFRAME_CHECK_PARITYREWRITE_STATUS;\nunsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS =\n    RAIDFRAME_CHECK_COPYBACK_STATUS;\nunsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG = RAIDFRAME_SET_AUTOCONFIG;\nunsigned IOCTL_RAIDFRAME_SET_ROOT = RAIDFRAME_SET_ROOT;\nunsigned IOCTL_RAIDFRAME_DELETE_COMPONENT = RAIDFRAME_DELETE_COMPONENT;\nunsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE =\n    RAIDFRAME_INCORPORATE_HOT_SPARE;\nunsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT =\n    RAIDFRAME_CHECK_RECON_STATUS_EXT;\nunsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT =\n    RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT;\nunsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT =\n    RAIDFRAME_CHECK_COPYBACK_STATUS_EXT;\nunsigned IOCTL_RAIDFRAME_CONFIGURE = RAIDFRAME_CONFIGURE;\nunsigned IOCTL_RAIDFRAME_GET_INFO = RAIDFRAME_GET_INFO;\nunsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS = RAIDFRAME_PARITYMAP_STATUS;\nunsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE =\n    RAIDFRAME_PARITYMAP_GET_DISABLE;\nunsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE =\n    RAIDFRAME_PARITYMAP_SET_DISABLE;\nunsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS = RAIDFRAME_PARITYMAP_SET_PARAMS;\nunsigned IOCTL_RAIDFRAME_SET_LAST_UNIT = RAIDFRAME_SET_LAST_UNIT;\nunsigned IOCTL_MBPPIOCSPARAM = MBPPIOCSPARAM;\nunsigned IOCTL_MBPPIOCGPARAM = MBPPIOCGPARAM;\nunsigned IOCTL_MBPPIOCGSTAT = MBPPIOCGSTAT;\nunsigned IOCTL_SESIOC_GETNOBJ = SESIOC_GETNOBJ;\nunsigned IOCTL_SESIOC_GETOBJMAP = SESIOC_GETOBJMAP;\nunsigned IOCTL_SESIOC_GETENCSTAT = SESIOC_GETENCSTAT;\nunsigned IOCTL_SESIOC_SETENCSTAT = SESIOC_SETENCSTAT;\nunsigned IOCTL_SESIOC_GETOBJSTAT = SESIOC_GETOBJSTAT;\nunsigned IOCTL_SESIOC_SETOBJSTAT = SESIOC_SETOBJSTAT;\nunsigned IOCTL_SESIOC_GETTEXT = SESIOC_GETTEXT;\nunsigned IOCTL_SESIOC_INIT = SESIOC_INIT;\nunsigned IOCTL_SUN_DKIOCGGEOM = SUN_DKIOCGGEOM;\nunsigned IOCTL_SUN_DKIOCINFO = SUN_DKIOCINFO;\nunsigned IOCTL_SUN_DKIOCGPART = SUN_DKIOCGPART;\nunsigned IOCTL_FBIOGTYPE = FBIOGTYPE;\nunsigned IOCTL_FBIOPUTCMAP = FBIOPUTCMAP;\nunsigned IOCTL_FBIOGETCMAP = FBIOGETCMAP;\nunsigned IOCTL_FBIOGATTR = FBIOGATTR;\nunsigned IOCTL_FBIOSVIDEO = FBIOSVIDEO;\nunsigned IOCTL_FBIOGVIDEO = FBIOGVIDEO;\nunsigned IOCTL_FBIOSCURSOR = FBIOSCURSOR;\nunsigned IOCTL_FBIOGCURSOR = FBIOGCURSOR;\nunsigned IOCTL_FBIOSCURPOS = FBIOSCURPOS;\nunsigned IOCTL_FBIOGCURPOS = FBIOGCURPOS;\nunsigned IOCTL_FBIOGCURMAX = FBIOGCURMAX;\nunsigned IOCTL_KIOCTRANS = KIOCTRANS;\nunsigned IOCTL_KIOCSETKEY = KIOCSETKEY;\nunsigned IOCTL_KIOCGETKEY = KIOCGETKEY;\nunsigned IOCTL_KIOCGTRANS = KIOCGTRANS;\nunsigned IOCTL_KIOCCMD = KIOCCMD;\nunsigned IOCTL_KIOCTYPE = KIOCTYPE;\nunsigned IOCTL_KIOCSDIRECT = KIOCSDIRECT;\nunsigned IOCTL_KIOCSKEY = KIOCSKEY;\nunsigned IOCTL_KIOCGKEY = KIOCGKEY;\nunsigned IOCTL_KIOCSLED = KIOCSLED;\nunsigned IOCTL_KIOCGLED = KIOCGLED;\nunsigned IOCTL_KIOCLAYOUT = KIOCLAYOUT;\nunsigned IOCTL_VUIDSFORMAT = VUIDSFORMAT;\nunsigned IOCTL_VUIDGFORMAT = VUIDGFORMAT;\nunsigned IOCTL_STICIO_GXINFO = STICIO_GXINFO;\nunsigned IOCTL_STICIO_RESET = STICIO_RESET;\nunsigned IOCTL_STICIO_STARTQ = STICIO_STARTQ;\nunsigned IOCTL_STICIO_STOPQ = STICIO_STOPQ;\nunsigned IOCTL_UKYOPON_IDENTIFY = UKYOPON_IDENTIFY;\nunsigned IOCTL_URIO_SEND_COMMAND = URIO_SEND_COMMAND;\nunsigned IOCTL_URIO_RECV_COMMAND = URIO_RECV_COMMAND;\nunsigned IOCTL_USB_REQUEST = USB_REQUEST;\nunsigned IOCTL_USB_SETDEBUG = USB_SETDEBUG;\nunsigned IOCTL_USB_DISCOVER = USB_DISCOVER;\nunsigned IOCTL_USB_DEVICEINFO = USB_DEVICEINFO;\nunsigned IOCTL_USB_DEVICEINFO_OLD = USB_DEVICEINFO_OLD;\nunsigned IOCTL_USB_DEVICESTATS = USB_DEVICESTATS;\nunsigned IOCTL_USB_GET_REPORT_DESC = USB_GET_REPORT_DESC;\nunsigned IOCTL_USB_SET_IMMED = USB_SET_IMMED;\nunsigned IOCTL_USB_GET_REPORT = USB_GET_REPORT;\nunsigned IOCTL_USB_SET_REPORT = USB_SET_REPORT;\nunsigned IOCTL_USB_GET_REPORT_ID = USB_GET_REPORT_ID;\nunsigned IOCTL_USB_GET_CONFIG = USB_GET_CONFIG;\nunsigned IOCTL_USB_SET_CONFIG = USB_SET_CONFIG;\nunsigned IOCTL_USB_GET_ALTINTERFACE = USB_GET_ALTINTERFACE;\nunsigned IOCTL_USB_SET_ALTINTERFACE = USB_SET_ALTINTERFACE;\nunsigned IOCTL_USB_GET_NO_ALT = USB_GET_NO_ALT;\nunsigned IOCTL_USB_GET_DEVICE_DESC = USB_GET_DEVICE_DESC;\nunsigned IOCTL_USB_GET_CONFIG_DESC = USB_GET_CONFIG_DESC;\nunsigned IOCTL_USB_GET_INTERFACE_DESC = USB_GET_INTERFACE_DESC;\nunsigned IOCTL_USB_GET_ENDPOINT_DESC = USB_GET_ENDPOINT_DESC;\nunsigned IOCTL_USB_GET_FULL_DESC = USB_GET_FULL_DESC;\nunsigned IOCTL_USB_GET_STRING_DESC = USB_GET_STRING_DESC;\nunsigned IOCTL_USB_DO_REQUEST = USB_DO_REQUEST;\nunsigned IOCTL_USB_GET_DEVICEINFO = USB_GET_DEVICEINFO;\nunsigned IOCTL_USB_GET_DEVICEINFO_OLD = USB_GET_DEVICEINFO_OLD;\nunsigned IOCTL_USB_SET_SHORT_XFER = USB_SET_SHORT_XFER;\nunsigned IOCTL_USB_SET_TIMEOUT = USB_SET_TIMEOUT;\nunsigned IOCTL_USB_SET_BULK_RA = USB_SET_BULK_RA;\nunsigned IOCTL_USB_SET_BULK_WB = USB_SET_BULK_WB;\nunsigned IOCTL_USB_SET_BULK_RA_OPT = USB_SET_BULK_RA_OPT;\nunsigned IOCTL_USB_SET_BULK_WB_OPT = USB_SET_BULK_WB_OPT;\nunsigned IOCTL_USB_GET_CM_OVER_DATA = USB_GET_CM_OVER_DATA;\nunsigned IOCTL_USB_SET_CM_OVER_DATA = USB_SET_CM_OVER_DATA;\nunsigned IOCTL_UTOPPYIOTURBO = UTOPPYIOTURBO;\nunsigned IOCTL_UTOPPYIOCANCEL = UTOPPYIOCANCEL;\nunsigned IOCTL_UTOPPYIOREBOOT = UTOPPYIOREBOOT;\nunsigned IOCTL_UTOPPYIOSTATS = UTOPPYIOSTATS;\nunsigned IOCTL_UTOPPYIORENAME = UTOPPYIORENAME;\nunsigned IOCTL_UTOPPYIOMKDIR = UTOPPYIOMKDIR;\nunsigned IOCTL_UTOPPYIODELETE = UTOPPYIODELETE;\nunsigned IOCTL_UTOPPYIOREADDIR = UTOPPYIOREADDIR;\nunsigned IOCTL_UTOPPYIOREADFILE = UTOPPYIOREADFILE;\nunsigned IOCTL_UTOPPYIOWRITEFILE = UTOPPYIOWRITEFILE;\nunsigned IOCTL_DIOSXDCMD = DIOSXDCMD;\nunsigned IOCTL_VT_OPENQRY = VT_OPENQRY;\nunsigned IOCTL_VT_SETMODE = VT_SETMODE;\nunsigned IOCTL_VT_GETMODE = VT_GETMODE;\nunsigned IOCTL_VT_RELDISP = VT_RELDISP;\nunsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;\nunsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;\nunsigned IOCTL_VT_GETACTIVE = VT_GETACTIVE;\nunsigned IOCTL_VT_GETSTATE = VT_GETSTATE;\nunsigned IOCTL_KDGETKBENT = KDGETKBENT;\nunsigned IOCTL_KDGKBMODE = KDGKBMODE;\nunsigned IOCTL_KDSKBMODE = KDSKBMODE;\nunsigned IOCTL_KDMKTONE = KDMKTONE;\nunsigned IOCTL_KDSETMODE = KDSETMODE;\nunsigned IOCTL_KDENABIO = KDENABIO;\nunsigned IOCTL_KDDISABIO = KDDISABIO;\nunsigned IOCTL_KDGKBTYPE = KDGKBTYPE;\nunsigned IOCTL_KDGETLED = KDGETLED;\nunsigned IOCTL_KDSETLED = KDSETLED;\nunsigned IOCTL_KDSETRAD = KDSETRAD;\nunsigned IOCTL_VGAPCVTID = VGAPCVTID;\nunsigned IOCTL_CONS_GETVERS = CONS_GETVERS;\nunsigned IOCTL_WSKBDIO_GTYPE = WSKBDIO_GTYPE;\nunsigned IOCTL_WSKBDIO_BELL = WSKBDIO_BELL;\nunsigned IOCTL_WSKBDIO_COMPLEXBELL = WSKBDIO_COMPLEXBELL;\nunsigned IOCTL_WSKBDIO_SETBELL = WSKBDIO_SETBELL;\nunsigned IOCTL_WSKBDIO_GETBELL = WSKBDIO_GETBELL;\nunsigned IOCTL_WSKBDIO_SETDEFAULTBELL = WSKBDIO_SETDEFAULTBELL;\nunsigned IOCTL_WSKBDIO_GETDEFAULTBELL = WSKBDIO_GETDEFAULTBELL;\nunsigned IOCTL_WSKBDIO_SETKEYREPEAT = WSKBDIO_SETKEYREPEAT;\nunsigned IOCTL_WSKBDIO_GETKEYREPEAT = WSKBDIO_GETKEYREPEAT;\nunsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT = WSKBDIO_SETDEFAULTKEYREPEAT;\nunsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT = WSKBDIO_GETDEFAULTKEYREPEAT;\nunsigned IOCTL_WSKBDIO_SETLEDS = WSKBDIO_SETLEDS;\nunsigned IOCTL_WSKBDIO_GETLEDS = WSKBDIO_GETLEDS;\nunsigned IOCTL_WSKBDIO_GETMAP = WSKBDIO_GETMAP;\nunsigned IOCTL_WSKBDIO_SETMAP = WSKBDIO_SETMAP;\nunsigned IOCTL_WSKBDIO_GETENCODING = WSKBDIO_GETENCODING;\nunsigned IOCTL_WSKBDIO_SETENCODING = WSKBDIO_SETENCODING;\nunsigned IOCTL_WSKBDIO_SETMODE = WSKBDIO_SETMODE;\nunsigned IOCTL_WSKBDIO_GETMODE = WSKBDIO_GETMODE;\nunsigned IOCTL_WSKBDIO_SETKEYCLICK = WSKBDIO_SETKEYCLICK;\nunsigned IOCTL_WSKBDIO_GETKEYCLICK = WSKBDIO_GETKEYCLICK;\nunsigned IOCTL_WSKBDIO_GETSCROLL = WSKBDIO_GETSCROLL;\nunsigned IOCTL_WSKBDIO_SETSCROLL = WSKBDIO_SETSCROLL;\nunsigned IOCTL_WSKBDIO_SETVERSION = WSKBDIO_SETVERSION;\nunsigned IOCTL_WSMOUSEIO_GTYPE = WSMOUSEIO_GTYPE;\nunsigned IOCTL_WSMOUSEIO_SRES = WSMOUSEIO_SRES;\nunsigned IOCTL_WSMOUSEIO_SSCALE = WSMOUSEIO_SSCALE;\nunsigned IOCTL_WSMOUSEIO_SRATE = WSMOUSEIO_SRATE;\nunsigned IOCTL_WSMOUSEIO_SCALIBCOORDS = WSMOUSEIO_SCALIBCOORDS;\nunsigned IOCTL_WSMOUSEIO_GCALIBCOORDS = WSMOUSEIO_GCALIBCOORDS;\nunsigned IOCTL_WSMOUSEIO_GETID = WSMOUSEIO_GETID;\nunsigned IOCTL_WSMOUSEIO_GETREPEAT = WSMOUSEIO_GETREPEAT;\nunsigned IOCTL_WSMOUSEIO_SETREPEAT = WSMOUSEIO_SETREPEAT;\nunsigned IOCTL_WSMOUSEIO_SETVERSION = WSMOUSEIO_SETVERSION;\nunsigned IOCTL_WSDISPLAYIO_GTYPE = WSDISPLAYIO_GTYPE;\nunsigned IOCTL_WSDISPLAYIO_GINFO = WSDISPLAYIO_GINFO;\nunsigned IOCTL_WSDISPLAYIO_GETCMAP = WSDISPLAYIO_GETCMAP;\nunsigned IOCTL_WSDISPLAYIO_PUTCMAP = WSDISPLAYIO_PUTCMAP;\nunsigned IOCTL_WSDISPLAYIO_GVIDEO = WSDISPLAYIO_GVIDEO;\nunsigned IOCTL_WSDISPLAYIO_SVIDEO = WSDISPLAYIO_SVIDEO;\nunsigned IOCTL_WSDISPLAYIO_GCURPOS = WSDISPLAYIO_GCURPOS;\nunsigned IOCTL_WSDISPLAYIO_SCURPOS = WSDISPLAYIO_SCURPOS;\nunsigned IOCTL_WSDISPLAYIO_GCURMAX = WSDISPLAYIO_GCURMAX;\nunsigned IOCTL_WSDISPLAYIO_GCURSOR = WSDISPLAYIO_GCURSOR;\nunsigned IOCTL_WSDISPLAYIO_SCURSOR = WSDISPLAYIO_SCURSOR;\nunsigned IOCTL_WSDISPLAYIO_GMODE = WSDISPLAYIO_GMODE;\nunsigned IOCTL_WSDISPLAYIO_SMODE = WSDISPLAYIO_SMODE;\nunsigned IOCTL_WSDISPLAYIO_LDFONT = WSDISPLAYIO_LDFONT;\nunsigned IOCTL_WSDISPLAYIO_ADDSCREEN = WSDISPLAYIO_ADDSCREEN;\nunsigned IOCTL_WSDISPLAYIO_DELSCREEN = WSDISPLAYIO_DELSCREEN;\nunsigned IOCTL_WSDISPLAYIO_SFONT = WSDISPLAYIO_SFONT;\nunsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD = _O_WSDISPLAYIO_SETKEYBOARD;\nunsigned IOCTL_WSDISPLAYIO_GETPARAM = WSDISPLAYIO_GETPARAM;\nunsigned IOCTL_WSDISPLAYIO_SETPARAM = WSDISPLAYIO_SETPARAM;\nunsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN = WSDISPLAYIO_GETACTIVESCREEN;\nunsigned IOCTL_WSDISPLAYIO_GETWSCHAR = WSDISPLAYIO_GETWSCHAR;\nunsigned IOCTL_WSDISPLAYIO_PUTWSCHAR = WSDISPLAYIO_PUTWSCHAR;\nunsigned IOCTL_WSDISPLAYIO_DGSCROLL = WSDISPLAYIO_DGSCROLL;\nunsigned IOCTL_WSDISPLAYIO_DSSCROLL = WSDISPLAYIO_DSSCROLL;\nunsigned IOCTL_WSDISPLAYIO_GMSGATTRS = WSDISPLAYIO_GMSGATTRS;\nunsigned IOCTL_WSDISPLAYIO_SMSGATTRS = WSDISPLAYIO_SMSGATTRS;\nunsigned IOCTL_WSDISPLAYIO_GBORDER = WSDISPLAYIO_GBORDER;\nunsigned IOCTL_WSDISPLAYIO_SBORDER = WSDISPLAYIO_SBORDER;\nunsigned IOCTL_WSDISPLAYIO_SSPLASH = WSDISPLAYIO_SSPLASH;\nunsigned IOCTL_WSDISPLAYIO_SPROGRESS = WSDISPLAYIO_SPROGRESS;\nunsigned IOCTL_WSDISPLAYIO_LINEBYTES = WSDISPLAYIO_LINEBYTES;\nunsigned IOCTL_WSDISPLAYIO_SETVERSION = WSDISPLAYIO_SETVERSION;\nunsigned IOCTL_WSMUXIO_ADD_DEVICE = WSMUXIO_ADD_DEVICE;\nunsigned IOCTL_WSMUXIO_REMOVE_DEVICE = WSMUXIO_REMOVE_DEVICE;\nunsigned IOCTL_WSMUXIO_LIST_DEVICES = WSMUXIO_LIST_DEVICES;\nunsigned IOCTL_WSMUXIO_INJECTEVENT = WSMUXIO_INJECTEVENT;\nunsigned IOCTL_WSDISPLAYIO_GET_BUSID = WSDISPLAYIO_GET_BUSID;\nunsigned IOCTL_WSDISPLAYIO_GET_EDID = WSDISPLAYIO_GET_EDID;\nunsigned IOCTL_WSDISPLAYIO_SET_POLLING = WSDISPLAYIO_SET_POLLING;\nunsigned IOCTL_WSDISPLAYIO_GET_FBINFO = WSDISPLAYIO_GET_FBINFO;\nunsigned IOCTL_WSDISPLAYIO_DOBLIT = WSDISPLAYIO_DOBLIT;\nunsigned IOCTL_WSDISPLAYIO_WAITBLIT = WSDISPLAYIO_WAITBLIT;\nunsigned IOCTL_BIOCLOCATE = BIOCLOCATE;\nunsigned IOCTL_BIOCINQ = BIOCINQ;\nunsigned IOCTL_BIOCDISK_NOVOL = BIOCDISK_NOVOL;\nunsigned IOCTL_BIOCDISK = BIOCDISK;\nunsigned IOCTL_BIOCVOL = BIOCVOL;\nunsigned IOCTL_BIOCALARM = BIOCALARM;\nunsigned IOCTL_BIOCBLINK = BIOCBLINK;\nunsigned IOCTL_BIOCSETSTATE = BIOCSETSTATE;\nunsigned IOCTL_BIOCVOLOPS = BIOCVOLOPS;\nunsigned IOCTL_MD_GETCONF = MD_GETCONF;\nunsigned IOCTL_MD_SETCONF = MD_SETCONF;\nunsigned IOCTL_CCDIOCSET = CCDIOCSET;\nunsigned IOCTL_CCDIOCCLR = CCDIOCCLR;\nunsigned IOCTL_CGDIOCSET = CGDIOCSET;\nunsigned IOCTL_CGDIOCCLR = CGDIOCCLR;\nunsigned IOCTL_CGDIOCGET = CGDIOCGET;\nunsigned IOCTL_FSSIOCSET = FSSIOCSET;\nunsigned IOCTL_FSSIOCGET = FSSIOCGET;\nunsigned IOCTL_FSSIOCCLR = FSSIOCCLR;\nunsigned IOCTL_FSSIOFSET = FSSIOFSET;\nunsigned IOCTL_FSSIOFGET = FSSIOFGET;\nunsigned IOCTL_BTDEV_ATTACH = BTDEV_ATTACH;\nunsigned IOCTL_BTDEV_DETACH = BTDEV_DETACH;\nunsigned IOCTL_BTSCO_GETINFO = BTSCO_GETINFO;\nunsigned IOCTL_KTTCP_IO_SEND = KTTCP_IO_SEND;\nunsigned IOCTL_KTTCP_IO_RECV = KTTCP_IO_RECV;\nunsigned IOCTL_IOC_LOCKSTAT_GVERSION = IOC_LOCKSTAT_GVERSION;\nunsigned IOCTL_IOC_LOCKSTAT_ENABLE = IOC_LOCKSTAT_ENABLE;\nunsigned IOCTL_IOC_LOCKSTAT_DISABLE = IOC_LOCKSTAT_DISABLE;\nunsigned IOCTL_VNDIOCSET = VNDIOCSET;\nunsigned IOCTL_VNDIOCCLR = VNDIOCCLR;\nunsigned IOCTL_VNDIOCGET = VNDIOCGET;\nunsigned IOCTL_SPKRTONE = SPKRTONE;\nunsigned IOCTL_SPKRTUNE = SPKRTUNE;\nunsigned IOCTL_SPKRGETVOL = SPKRGETVOL;\nunsigned IOCTL_SPKRSETVOL = SPKRSETVOL;\n#if defined(__x86_64__)\nunsigned IOCTL_NVMM_IOC_CAPABILITY = NVMM_IOC_CAPABILITY;\nunsigned IOCTL_NVMM_IOC_MACHINE_CREATE = NVMM_IOC_MACHINE_CREATE;\nunsigned IOCTL_NVMM_IOC_MACHINE_DESTROY = NVMM_IOC_MACHINE_DESTROY;\nunsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE = NVMM_IOC_MACHINE_CONFIGURE;\nunsigned IOCTL_NVMM_IOC_VCPU_CREATE = NVMM_IOC_VCPU_CREATE;\nunsigned IOCTL_NVMM_IOC_VCPU_DESTROY = NVMM_IOC_VCPU_DESTROY;\nunsigned IOCTL_NVMM_IOC_VCPU_CONFIGURE = NVMM_IOC_VCPU_CONFIGURE;\nunsigned IOCTL_NVMM_IOC_VCPU_SETSTATE = NVMM_IOC_VCPU_SETSTATE;\nunsigned IOCTL_NVMM_IOC_VCPU_GETSTATE = NVMM_IOC_VCPU_GETSTATE;\nunsigned IOCTL_NVMM_IOC_VCPU_INJECT = NVMM_IOC_VCPU_INJECT;\nunsigned IOCTL_NVMM_IOC_VCPU_RUN = NVMM_IOC_VCPU_RUN;\nunsigned IOCTL_NVMM_IOC_GPA_MAP = NVMM_IOC_GPA_MAP;\nunsigned IOCTL_NVMM_IOC_GPA_UNMAP = NVMM_IOC_GPA_UNMAP;\nunsigned IOCTL_NVMM_IOC_HVA_MAP = NVMM_IOC_HVA_MAP;\nunsigned IOCTL_NVMM_IOC_HVA_UNMAP = NVMM_IOC_HVA_UNMAP;\nunsigned IOCTL_NVMM_IOC_CTL = NVMM_IOC_CTL;\n#endif\nunsigned IOCTL_SPI_IOCTL_CONFIGURE = SPI_IOCTL_CONFIGURE;\nunsigned IOCTL_SPI_IOCTL_TRANSFER = SPI_IOCTL_TRANSFER;\nunsigned IOCTL_AUTOFSREQUEST = AUTOFSREQUEST;\nunsigned IOCTL_AUTOFSDONE = AUTOFSDONE;\nunsigned IOCTL_BIOCGBLEN = BIOCGBLEN;\nunsigned IOCTL_BIOCSBLEN = BIOCSBLEN;\nunsigned IOCTL_BIOCSETF = BIOCSETF;\nunsigned IOCTL_BIOCFLUSH = BIOCFLUSH;\nunsigned IOCTL_BIOCPROMISC = BIOCPROMISC;\nunsigned IOCTL_BIOCGDLT = BIOCGDLT;\nunsigned IOCTL_BIOCGETIF = BIOCGETIF;\nunsigned IOCTL_BIOCSETIF = BIOCSETIF;\nunsigned IOCTL_BIOCGSTATS = BIOCGSTATS;\nunsigned IOCTL_BIOCGSTATSOLD = BIOCGSTATSOLD;\nunsigned IOCTL_BIOCIMMEDIATE = BIOCIMMEDIATE;\nunsigned IOCTL_BIOCVERSION = BIOCVERSION;\nunsigned IOCTL_BIOCSTCPF = BIOCSTCPF;\nunsigned IOCTL_BIOCSUDPF = BIOCSUDPF;\nunsigned IOCTL_BIOCGHDRCMPLT = BIOCGHDRCMPLT;\nunsigned IOCTL_BIOCSHDRCMPLT = BIOCSHDRCMPLT;\nunsigned IOCTL_BIOCSDLT = BIOCSDLT;\nunsigned IOCTL_BIOCGDLTLIST = BIOCGDLTLIST;\nunsigned IOCTL_BIOCGDIRECTION = BIOCGDIRECTION;\nunsigned IOCTL_BIOCSDIRECTION = BIOCSDIRECTION;\nunsigned IOCTL_BIOCSRTIMEOUT = BIOCSRTIMEOUT;\nunsigned IOCTL_BIOCGRTIMEOUT = BIOCGRTIMEOUT;\nunsigned IOCTL_BIOCGFEEDBACK = BIOCGFEEDBACK;\nunsigned IOCTL_BIOCSFEEDBACK = BIOCSFEEDBACK;\nunsigned IOCTL_GRESADDRS = GRESADDRS;\nunsigned IOCTL_GRESADDRD = GRESADDRD;\nunsigned IOCTL_GREGADDRS = GREGADDRS;\nunsigned IOCTL_GREGADDRD = GREGADDRD;\nunsigned IOCTL_GRESPROTO = GRESPROTO;\nunsigned IOCTL_GREGPROTO = GREGPROTO;\nunsigned IOCTL_GRESSOCK = GRESSOCK;\nunsigned IOCTL_GREDSOCK = GREDSOCK;\nunsigned IOCTL_PPPIOCGRAWIN = PPPIOCGRAWIN;\nunsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;\nunsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS;\nunsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;\nunsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP;\nunsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT;\nunsigned IOCTL_PPPIOCGRASYNCMAP = PPPIOCGRASYNCMAP;\nunsigned IOCTL_PPPIOCSRASYNCMAP = PPPIOCSRASYNCMAP;\nunsigned IOCTL_PPPIOCGMRU = PPPIOCGMRU;\nunsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;\nunsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;\nunsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP;\nunsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;\nunsigned IOCTL_PPPIOCXFERUNIT = PPPIOCXFERUNIT;\nunsigned IOCTL_PPPIOCSCOMPRESS = PPPIOCSCOMPRESS;\nunsigned IOCTL_PPPIOCGNPMODE = PPPIOCGNPMODE;\nunsigned IOCTL_PPPIOCSNPMODE = PPPIOCSNPMODE;\nunsigned IOCTL_PPPIOCGIDLE = PPPIOCGIDLE;\nunsigned IOCTL_PPPIOCGMTU = PPPIOCGMTU;\nunsigned IOCTL_PPPIOCSMTU = PPPIOCSMTU;\nunsigned IOCTL_SIOCGPPPSTATS = SIOCGPPPSTATS;\nunsigned IOCTL_SIOCGPPPCSTATS = SIOCGPPPCSTATS;\nunsigned IOCTL_IOC_NPF_VERSION = IOC_NPF_VERSION;\nunsigned IOCTL_IOC_NPF_SWITCH = IOC_NPF_SWITCH;\nunsigned IOCTL_IOC_NPF_LOAD = IOC_NPF_LOAD;\nunsigned IOCTL_IOC_NPF_TABLE = IOC_NPF_TABLE;\nunsigned IOCTL_IOC_NPF_STATS = IOC_NPF_STATS;\nunsigned IOCTL_IOC_NPF_SAVE = IOC_NPF_SAVE;\nunsigned IOCTL_IOC_NPF_RULE = IOC_NPF_RULE;\nunsigned IOCTL_IOC_NPF_CONN_LOOKUP = IOC_NPF_CONN_LOOKUP;\nunsigned IOCTL_IOC_NPF_TABLE_REPLACE = IOC_NPF_TABLE_REPLACE;\nunsigned IOCTL_PPPOESETPARMS = PPPOESETPARMS;\nunsigned IOCTL_PPPOEGETPARMS = PPPOEGETPARMS;\nunsigned IOCTL_PPPOEGETSESSION = PPPOEGETSESSION;\nunsigned IOCTL_SPPPGETAUTHCFG = SPPPGETAUTHCFG;\nunsigned IOCTL_SPPPSETAUTHCFG = SPPPSETAUTHCFG;\nunsigned IOCTL_SPPPGETLCPCFG = SPPPGETLCPCFG;\nunsigned IOCTL_SPPPSETLCPCFG = SPPPSETLCPCFG;\nunsigned IOCTL_SPPPGETSTATUS = SPPPGETSTATUS;\nunsigned IOCTL_SPPPGETSTATUSNCP = SPPPGETSTATUSNCP;\nunsigned IOCTL_SPPPGETIDLETO = SPPPGETIDLETO;\nunsigned IOCTL_SPPPSETIDLETO = SPPPSETIDLETO;\nunsigned IOCTL_SPPPGETAUTHFAILURES = SPPPGETAUTHFAILURES;\nunsigned IOCTL_SPPPSETAUTHFAILURE = SPPPSETAUTHFAILURE;\nunsigned IOCTL_SPPPSETDNSOPTS = SPPPSETDNSOPTS;\nunsigned IOCTL_SPPPGETDNSOPTS = SPPPGETDNSOPTS;\nunsigned IOCTL_SPPPGETDNSADDRS = SPPPGETDNSADDRS;\nunsigned IOCTL_SPPPSETKEEPALIVE = SPPPSETKEEPALIVE;\nunsigned IOCTL_SPPPGETKEEPALIVE = SPPPGETKEEPALIVE;\nunsigned IOCTL_SRT_GETNRT = SRT_GETNRT;\nunsigned IOCTL_SRT_GETRT = SRT_GETRT;\nunsigned IOCTL_SRT_SETRT = SRT_SETRT;\nunsigned IOCTL_SRT_DELRT = SRT_DELRT;\nunsigned IOCTL_SRT_SFLAGS = SRT_SFLAGS;\nunsigned IOCTL_SRT_GFLAGS = SRT_GFLAGS;\nunsigned IOCTL_SRT_SGFLAGS = SRT_SGFLAGS;\nunsigned IOCTL_SRT_DEBUG = SRT_DEBUG;\nunsigned IOCTL_TAPGIFNAME = TAPGIFNAME;\nunsigned IOCTL_TUNSDEBUG = TUNSDEBUG;\nunsigned IOCTL_TUNGDEBUG = TUNGDEBUG;\nunsigned IOCTL_TUNSIFMODE = TUNSIFMODE;\nunsigned IOCTL_TUNSLMODE = TUNSLMODE;\nunsigned IOCTL_TUNSIFHEAD = TUNSIFHEAD;\nunsigned IOCTL_TUNGIFHEAD = TUNGIFHEAD;\nunsigned IOCTL_DIOCSTART = DIOCSTART;\nunsigned IOCTL_DIOCSTOP = DIOCSTOP;\nunsigned IOCTL_DIOCADDRULE = DIOCADDRULE;\nunsigned IOCTL_DIOCGETRULES = DIOCGETRULES;\nunsigned IOCTL_DIOCGETRULE = DIOCGETRULE;\nunsigned IOCTL_DIOCSETLCK = DIOCSETLCK;\nunsigned IOCTL_DIOCCLRSTATES = DIOCCLRSTATES;\nunsigned IOCTL_DIOCGETSTATE = DIOCGETSTATE;\nunsigned IOCTL_DIOCSETSTATUSIF = DIOCSETSTATUSIF;\nunsigned IOCTL_DIOCGETSTATUS = DIOCGETSTATUS;\nunsigned IOCTL_DIOCCLRSTATUS = DIOCCLRSTATUS;\nunsigned IOCTL_DIOCNATLOOK = DIOCNATLOOK;\nunsigned IOCTL_DIOCSETDEBUG = DIOCSETDEBUG;\nunsigned IOCTL_DIOCGETSTATES = DIOCGETSTATES;\nunsigned IOCTL_DIOCCHANGERULE = DIOCCHANGERULE;\nunsigned IOCTL_DIOCSETTIMEOUT = DIOCSETTIMEOUT;\nunsigned IOCTL_DIOCGETTIMEOUT = DIOCGETTIMEOUT;\nunsigned IOCTL_DIOCADDSTATE = DIOCADDSTATE;\nunsigned IOCTL_DIOCCLRRULECTRS = DIOCCLRRULECTRS;\nunsigned IOCTL_DIOCGETLIMIT = DIOCGETLIMIT;\nunsigned IOCTL_DIOCSETLIMIT = DIOCSETLIMIT;\nunsigned IOCTL_DIOCKILLSTATES = DIOCKILLSTATES;\nunsigned IOCTL_DIOCSTARTALTQ = DIOCSTARTALTQ;\nunsigned IOCTL_DIOCSTOPALTQ = DIOCSTOPALTQ;\nunsigned IOCTL_DIOCADDALTQ = DIOCADDALTQ;\nunsigned IOCTL_DIOCGETALTQS = DIOCGETALTQS;\nunsigned IOCTL_DIOCGETALTQ = DIOCGETALTQ;\nunsigned IOCTL_DIOCCHANGEALTQ = DIOCCHANGEALTQ;\nunsigned IOCTL_DIOCGETQSTATS = DIOCGETQSTATS;\nunsigned IOCTL_DIOCBEGINADDRS = DIOCBEGINADDRS;\nunsigned IOCTL_DIOCADDADDR = DIOCADDADDR;\nunsigned IOCTL_DIOCGETADDRS = DIOCGETADDRS;\nunsigned IOCTL_DIOCGETADDR = DIOCGETADDR;\nunsigned IOCTL_DIOCCHANGEADDR = DIOCCHANGEADDR;\nunsigned IOCTL_DIOCADDSTATES = DIOCADDSTATES;\nunsigned IOCTL_DIOCGETRULESETS = DIOCGETRULESETS;\nunsigned IOCTL_DIOCGETRULESET = DIOCGETRULESET;\nunsigned IOCTL_DIOCRCLRTABLES = DIOCRCLRTABLES;\nunsigned IOCTL_DIOCRADDTABLES = DIOCRADDTABLES;\nunsigned IOCTL_DIOCRDELTABLES = DIOCRDELTABLES;\nunsigned IOCTL_DIOCRGETTABLES = DIOCRGETTABLES;\nunsigned IOCTL_DIOCRGETTSTATS = DIOCRGETTSTATS;\nunsigned IOCTL_DIOCRCLRTSTATS = DIOCRCLRTSTATS;\nunsigned IOCTL_DIOCRCLRADDRS = DIOCRCLRADDRS;\nunsigned IOCTL_DIOCRADDADDRS = DIOCRADDADDRS;\nunsigned IOCTL_DIOCRDELADDRS = DIOCRDELADDRS;\nunsigned IOCTL_DIOCRSETADDRS = DIOCRSETADDRS;\nunsigned IOCTL_DIOCRGETADDRS = DIOCRGETADDRS;\nunsigned IOCTL_DIOCRGETASTATS = DIOCRGETASTATS;\nunsigned IOCTL_DIOCRCLRASTATS = DIOCRCLRASTATS;\nunsigned IOCTL_DIOCRTSTADDRS = DIOCRTSTADDRS;\nunsigned IOCTL_DIOCRSETTFLAGS = DIOCRSETTFLAGS;\nunsigned IOCTL_DIOCRINADEFINE = DIOCRINADEFINE;\nunsigned IOCTL_DIOCOSFPFLUSH = DIOCOSFPFLUSH;\nunsigned IOCTL_DIOCOSFPADD = DIOCOSFPADD;\nunsigned IOCTL_DIOCOSFPGET = DIOCOSFPGET;\nunsigned IOCTL_DIOCXBEGIN = DIOCXBEGIN;\nunsigned IOCTL_DIOCXCOMMIT = DIOCXCOMMIT;\nunsigned IOCTL_DIOCXROLLBACK = DIOCXROLLBACK;\nunsigned IOCTL_DIOCGETSRCNODES = DIOCGETSRCNODES;\nunsigned IOCTL_DIOCCLRSRCNODES = DIOCCLRSRCNODES;\nunsigned IOCTL_DIOCSETHOSTID = DIOCSETHOSTID;\nunsigned IOCTL_DIOCIGETIFACES = DIOCIGETIFACES;\nunsigned IOCTL_DIOCSETIFFLAG = DIOCSETIFFLAG;\nunsigned IOCTL_DIOCCLRIFFLAG = DIOCCLRIFFLAG;\nunsigned IOCTL_DIOCKILLSRCNODES = DIOCKILLSRCNODES;\nunsigned IOCTL_SLIOCGUNIT = SLIOCGUNIT;\nunsigned IOCTL_SIOCGBTINFO = SIOCGBTINFO;\nunsigned IOCTL_SIOCGBTINFOA = SIOCGBTINFOA;\nunsigned IOCTL_SIOCNBTINFO = SIOCNBTINFO;\nunsigned IOCTL_SIOCSBTFLAGS = SIOCSBTFLAGS;\nunsigned IOCTL_SIOCSBTPOLICY = SIOCSBTPOLICY;\nunsigned IOCTL_SIOCSBTPTYPE = SIOCSBTPTYPE;\nunsigned IOCTL_SIOCGBTSTATS = SIOCGBTSTATS;\nunsigned IOCTL_SIOCZBTSTATS = SIOCZBTSTATS;\nunsigned IOCTL_SIOCBTDUMP = SIOCBTDUMP;\nunsigned IOCTL_SIOCSBTSCOMTU = SIOCSBTSCOMTU;\nunsigned IOCTL_SIOCGBTFEAT = SIOCGBTFEAT;\nunsigned IOCTL_SIOCADNAT = SIOCADNAT;\nunsigned IOCTL_SIOCRMNAT = SIOCRMNAT;\nunsigned IOCTL_SIOCGNATS = SIOCGNATS;\nunsigned IOCTL_SIOCGNATL = SIOCGNATL;\nunsigned IOCTL_SIOCPURGENAT = SIOCPURGENAT;\nunsigned IOCTL_SIOCCONNECTX = SIOCCONNECTX;\nunsigned IOCTL_SIOCCONNECTXDEL = SIOCCONNECTXDEL;\nunsigned IOCTL_SIOCSIFINFO_FLAGS = SIOCSIFINFO_FLAGS;\nunsigned IOCTL_SIOCAADDRCTL_POLICY = SIOCAADDRCTL_POLICY;\nunsigned IOCTL_SIOCDADDRCTL_POLICY = SIOCDADDRCTL_POLICY;\nunsigned IOCTL_SMBIOC_OPENSESSION = SMBIOC_OPENSESSION;\nunsigned IOCTL_SMBIOC_OPENSHARE = SMBIOC_OPENSHARE;\nunsigned IOCTL_SMBIOC_REQUEST = SMBIOC_REQUEST;\nunsigned IOCTL_SMBIOC_SETFLAGS = SMBIOC_SETFLAGS;\nunsigned IOCTL_SMBIOC_LOOKUP = SMBIOC_LOOKUP;\nunsigned IOCTL_SMBIOC_READ = SMBIOC_READ;\nunsigned IOCTL_SMBIOC_WRITE = SMBIOC_WRITE;\nunsigned IOCTL_AGPIOC_INFO = AGPIOC_INFO;\nunsigned IOCTL_AGPIOC_ACQUIRE = AGPIOC_ACQUIRE;\nunsigned IOCTL_AGPIOC_RELEASE = AGPIOC_RELEASE;\nunsigned IOCTL_AGPIOC_SETUP = AGPIOC_SETUP;\nunsigned IOCTL_AGPIOC_ALLOCATE = AGPIOC_ALLOCATE;\nunsigned IOCTL_AGPIOC_DEALLOCATE = AGPIOC_DEALLOCATE;\nunsigned IOCTL_AGPIOC_BIND = AGPIOC_BIND;\nunsigned IOCTL_AGPIOC_UNBIND = AGPIOC_UNBIND;\nunsigned IOCTL_AUDIO_GETINFO = AUDIO_GETINFO;\nunsigned IOCTL_AUDIO_SETINFO = AUDIO_SETINFO;\nunsigned IOCTL_AUDIO_DRAIN = AUDIO_DRAIN;\nunsigned IOCTL_AUDIO_FLUSH = AUDIO_FLUSH;\nunsigned IOCTL_AUDIO_WSEEK = AUDIO_WSEEK;\nunsigned IOCTL_AUDIO_RERROR = AUDIO_RERROR;\nunsigned IOCTL_AUDIO_GETDEV = AUDIO_GETDEV;\nunsigned IOCTL_AUDIO_GETENC = AUDIO_GETENC;\nunsigned IOCTL_AUDIO_GETFD = AUDIO_GETFD;\nunsigned IOCTL_AUDIO_SETFD = AUDIO_SETFD;\nunsigned IOCTL_AUDIO_PERROR = AUDIO_PERROR;\nunsigned IOCTL_AUDIO_GETIOFFS = AUDIO_GETIOFFS;\nunsigned IOCTL_AUDIO_GETOOFFS = AUDIO_GETOOFFS;\nunsigned IOCTL_AUDIO_GETPROPS = AUDIO_GETPROPS;\nunsigned IOCTL_AUDIO_GETBUFINFO = AUDIO_GETBUFINFO;\nunsigned IOCTL_AUDIO_SETCHAN = AUDIO_SETCHAN;\nunsigned IOCTL_AUDIO_GETCHAN = AUDIO_GETCHAN;\nunsigned IOCTL_AUDIO_QUERYFORMAT = AUDIO_QUERYFORMAT;\nunsigned IOCTL_AUDIO_GETFORMAT = AUDIO_GETFORMAT;\nunsigned IOCTL_AUDIO_SETFORMAT = AUDIO_SETFORMAT;\nunsigned IOCTL_AUDIO_MIXER_READ = AUDIO_MIXER_READ;\nunsigned IOCTL_AUDIO_MIXER_WRITE = AUDIO_MIXER_WRITE;\nunsigned IOCTL_AUDIO_MIXER_DEVINFO = AUDIO_MIXER_DEVINFO;\nunsigned IOCTL_ATAIOCCOMMAND = ATAIOCCOMMAND;\nunsigned IOCTL_ATABUSIOSCAN = ATABUSIOSCAN;\nunsigned IOCTL_ATABUSIORESET = ATABUSIORESET;\nunsigned IOCTL_ATABUSIODETACH = ATABUSIODETACH;\nunsigned IOCTL_CDIOCPLAYTRACKS = CDIOCPLAYTRACKS;\nunsigned IOCTL_CDIOCPLAYBLOCKS = CDIOCPLAYBLOCKS;\nunsigned IOCTL_CDIOCREADSUBCHANNEL = CDIOCREADSUBCHANNEL;\nunsigned IOCTL_CDIOREADTOCHEADER = CDIOREADTOCHEADER;\nunsigned IOCTL_CDIOREADTOCENTRIES = CDIOREADTOCENTRIES;\nunsigned IOCTL_CDIOREADMSADDR = CDIOREADMSADDR;\nunsigned IOCTL_CDIOCSETPATCH = CDIOCSETPATCH;\nunsigned IOCTL_CDIOCGETVOL = CDIOCGETVOL;\nunsigned IOCTL_CDIOCSETVOL = CDIOCSETVOL;\nunsigned IOCTL_CDIOCSETMONO = CDIOCSETMONO;\nunsigned IOCTL_CDIOCSETSTEREO = CDIOCSETSTEREO;\nunsigned IOCTL_CDIOCSETMUTE = CDIOCSETMUTE;\nunsigned IOCTL_CDIOCSETLEFT = CDIOCSETLEFT;\nunsigned IOCTL_CDIOCSETRIGHT = CDIOCSETRIGHT;\nunsigned IOCTL_CDIOCSETDEBUG = CDIOCSETDEBUG;\nunsigned IOCTL_CDIOCCLRDEBUG = CDIOCCLRDEBUG;\nunsigned IOCTL_CDIOCPAUSE = CDIOCPAUSE;\nunsigned IOCTL_CDIOCRESUME = CDIOCRESUME;\nunsigned IOCTL_CDIOCRESET = CDIOCRESET;\nunsigned IOCTL_CDIOCSTART = CDIOCSTART;\nunsigned IOCTL_CDIOCSTOP = CDIOCSTOP;\nunsigned IOCTL_CDIOCEJECT = CDIOCEJECT;\nunsigned IOCTL_CDIOCALLOW = CDIOCALLOW;\nunsigned IOCTL_CDIOCPREVENT = CDIOCPREVENT;\nunsigned IOCTL_CDIOCCLOSE = CDIOCCLOSE;\nunsigned IOCTL_CDIOCPLAYMSF = CDIOCPLAYMSF;\nunsigned IOCTL_CDIOCLOADUNLOAD = CDIOCLOADUNLOAD;\nunsigned IOCTL_CHIOMOVE = CHIOMOVE;\nunsigned IOCTL_CHIOEXCHANGE = CHIOEXCHANGE;\nunsigned IOCTL_CHIOPOSITION = CHIOPOSITION;\nunsigned IOCTL_CHIOGPICKER = CHIOGPICKER;\nunsigned IOCTL_CHIOSPICKER = CHIOSPICKER;\nunsigned IOCTL_CHIOGPARAMS = CHIOGPARAMS;\nunsigned IOCTL_CHIOIELEM = CHIOIELEM;\nunsigned IOCTL_OCHIOGSTATUS = OCHIOGSTATUS;\nunsigned IOCTL_CHIOGSTATUS = CHIOGSTATUS;\nunsigned IOCTL_CHIOSVOLTAG = CHIOSVOLTAG;\nunsigned IOCTL_CLOCKCTL_SETTIMEOFDAY = CLOCKCTL_SETTIMEOFDAY;\nunsigned IOCTL_CLOCKCTL_ADJTIME = CLOCKCTL_ADJTIME;\nunsigned IOCTL_CLOCKCTL_CLOCK_SETTIME = CLOCKCTL_CLOCK_SETTIME;\nunsigned IOCTL_CLOCKCTL_NTP_ADJTIME = CLOCKCTL_NTP_ADJTIME;\nunsigned IOCTL_IOC_CPU_SETSTATE = IOC_CPU_SETSTATE;\nunsigned IOCTL_IOC_CPU_GETSTATE = IOC_CPU_GETSTATE;\nunsigned IOCTL_IOC_CPU_GETCOUNT = IOC_CPU_GETCOUNT;\nunsigned IOCTL_IOC_CPU_MAPID = IOC_CPU_MAPID;\nunsigned IOCTL_IOC_CPU_UCODE_GET_VERSION = IOC_CPU_UCODE_GET_VERSION;\nunsigned IOCTL_IOC_CPU_UCODE_APPLY = IOC_CPU_UCODE_APPLY;\nunsigned IOCTL_DIOCGDINFO = DIOCGDINFO;\nunsigned IOCTL_DIOCSDINFO = DIOCSDINFO;\nunsigned IOCTL_DIOCWDINFO = DIOCWDINFO;\nunsigned IOCTL_DIOCRFORMAT = DIOCRFORMAT;\nunsigned IOCTL_DIOCWFORMAT = DIOCWFORMAT;\nunsigned IOCTL_DIOCSSTEP = DIOCSSTEP;\nunsigned IOCTL_DIOCSRETRIES = DIOCSRETRIES;\nunsigned IOCTL_DIOCKLABEL = DIOCKLABEL;\nunsigned IOCTL_DIOCWLABEL = DIOCWLABEL;\nunsigned IOCTL_DIOCSBAD = DIOCSBAD;\nunsigned IOCTL_DIOCEJECT = DIOCEJECT;\nunsigned IOCTL_ODIOCEJECT = ODIOCEJECT;\nunsigned IOCTL_DIOCLOCK = DIOCLOCK;\nunsigned IOCTL_DIOCGDEFLABEL = DIOCGDEFLABEL;\nunsigned IOCTL_DIOCCLRLABEL = DIOCCLRLABEL;\nunsigned IOCTL_DIOCGCACHE = DIOCGCACHE;\nunsigned IOCTL_DIOCSCACHE = DIOCSCACHE;\nunsigned IOCTL_DIOCCACHESYNC = DIOCCACHESYNC;\nunsigned IOCTL_DIOCBSLIST = DIOCBSLIST;\nunsigned IOCTL_DIOCBSFLUSH = DIOCBSFLUSH;\nunsigned IOCTL_DIOCAWEDGE = DIOCAWEDGE;\nunsigned IOCTL_DIOCGWEDGEINFO = DIOCGWEDGEINFO;\nunsigned IOCTL_DIOCDWEDGE = DIOCDWEDGE;\nunsigned IOCTL_DIOCLWEDGES = DIOCLWEDGES;\nunsigned IOCTL_DIOCGSTRATEGY = DIOCGSTRATEGY;\nunsigned IOCTL_DIOCSSTRATEGY = DIOCSSTRATEGY;\nunsigned IOCTL_DIOCGDISKINFO = DIOCGDISKINFO;\nunsigned IOCTL_DIOCTUR = DIOCTUR;\nunsigned IOCTL_DIOCMWEDGES = DIOCMWEDGES;\nunsigned IOCTL_DIOCGSECTORSIZE = DIOCGSECTORSIZE;\nunsigned IOCTL_DIOCGMEDIASIZE = DIOCGMEDIASIZE;\nunsigned IOCTL_DIOCRMWEDGES = DIOCRMWEDGES;\nunsigned IOCTL_DRVDETACHDEV = DRVDETACHDEV;\nunsigned IOCTL_DRVRESCANBUS = DRVRESCANBUS;\nunsigned IOCTL_DRVCTLCOMMAND = DRVCTLCOMMAND;\nunsigned IOCTL_DRVRESUMEDEV = DRVRESUMEDEV;\nunsigned IOCTL_DRVLISTDEV = DRVLISTDEV;\nunsigned IOCTL_DRVGETEVENT = DRVGETEVENT;\nunsigned IOCTL_DRVSUSPENDDEV = DRVSUSPENDDEV;\nunsigned IOCTL_DVD_READ_STRUCT = DVD_READ_STRUCT;\nunsigned IOCTL_DVD_WRITE_STRUCT = DVD_WRITE_STRUCT;\nunsigned IOCTL_DVD_AUTH = DVD_AUTH;\nunsigned IOCTL_ENVSYS_GETDICTIONARY = ENVSYS_GETDICTIONARY;\nunsigned IOCTL_ENVSYS_SETDICTIONARY = ENVSYS_SETDICTIONARY;\nunsigned IOCTL_ENVSYS_REMOVEPROPS = ENVSYS_REMOVEPROPS;\nunsigned IOCTL_ENVSYS_GTREDATA = ENVSYS_GTREDATA;\nunsigned IOCTL_ENVSYS_GTREINFO = ENVSYS_GTREINFO;\nunsigned IOCTL_KFILTER_BYFILTER = KFILTER_BYFILTER;\nunsigned IOCTL_KFILTER_BYNAME = KFILTER_BYNAME;\nunsigned IOCTL_FDIOCGETOPTS = FDIOCGETOPTS;\nunsigned IOCTL_FDIOCSETOPTS = FDIOCSETOPTS;\nunsigned IOCTL_FDIOCSETFORMAT = FDIOCSETFORMAT;\nunsigned IOCTL_FDIOCGETFORMAT = FDIOCGETFORMAT;\nunsigned IOCTL_FDIOCFORMAT_TRACK = FDIOCFORMAT_TRACK;\nunsigned IOCTL_FIOCLEX = FIOCLEX;\nunsigned IOCTL_FIONCLEX = FIONCLEX;\nunsigned IOCTL_FIOSEEKDATA = FIOSEEKDATA;\nunsigned IOCTL_FIOSEEKHOLE = FIOSEEKHOLE;\nunsigned IOCTL_FIONREAD = FIONREAD;\nunsigned IOCTL_FIONBIO = FIONBIO;\nunsigned IOCTL_FIOASYNC = FIOASYNC;\nunsigned IOCTL_FIOSETOWN = FIOSETOWN;\nunsigned IOCTL_FIOGETOWN = FIOGETOWN;\nunsigned IOCTL_OFIOGETBMAP = OFIOGETBMAP;\nunsigned IOCTL_FIOGETBMAP = FIOGETBMAP;\nunsigned IOCTL_FIONWRITE = FIONWRITE;\nunsigned IOCTL_FIONSPACE = FIONSPACE;\nunsigned IOCTL_GPIOINFO = GPIOINFO;\nunsigned IOCTL_GPIOSET = GPIOSET;\nunsigned IOCTL_GPIOUNSET = GPIOUNSET;\nunsigned IOCTL_GPIOREAD = GPIOREAD;\nunsigned IOCTL_GPIOWRITE = GPIOWRITE;\nunsigned IOCTL_GPIOTOGGLE = GPIOTOGGLE;\nunsigned IOCTL_GPIOATTACH = GPIOATTACH;\nunsigned IOCTL_PTIOCNETBSD = PTIOCNETBSD;\nunsigned IOCTL_PTIOCSUNOS = PTIOCSUNOS;\nunsigned IOCTL_PTIOCLINUX = PTIOCLINUX;\nunsigned IOCTL_PTIOCFREEBSD = PTIOCFREEBSD;\nunsigned IOCTL_PTIOCULTRIX = PTIOCULTRIX;\nunsigned IOCTL_TIOCHPCL = TIOCHPCL;\nunsigned IOCTL_TIOCGETP = TIOCGETP;\nunsigned IOCTL_TIOCSETP = TIOCSETP;\nunsigned IOCTL_TIOCSETN = TIOCSETN;\nunsigned IOCTL_TIOCSETC = TIOCSETC;\nunsigned IOCTL_TIOCGETC = TIOCGETC;\nunsigned IOCTL_TIOCLBIS = TIOCLBIS;\nunsigned IOCTL_TIOCLBIC = TIOCLBIC;\nunsigned IOCTL_TIOCLSET = TIOCLSET;\nunsigned IOCTL_TIOCLGET = TIOCLGET;\nunsigned IOCTL_TIOCSLTC = TIOCSLTC;\nunsigned IOCTL_TIOCGLTC = TIOCGLTC;\nunsigned IOCTL_OTIOCCONS = OTIOCCONS;\nunsigned IOCTL_JOY_SETTIMEOUT = JOY_SETTIMEOUT;\nunsigned IOCTL_JOY_GETTIMEOUT = JOY_GETTIMEOUT;\nunsigned IOCTL_JOY_SET_X_OFFSET = JOY_SET_X_OFFSET;\nunsigned IOCTL_JOY_SET_Y_OFFSET = JOY_SET_Y_OFFSET;\nunsigned IOCTL_JOY_GET_X_OFFSET = JOY_GET_X_OFFSET;\nunsigned IOCTL_JOY_GET_Y_OFFSET = JOY_GET_Y_OFFSET;\nunsigned IOCTL_OKIOCGSYMBOL = OKIOCGSYMBOL;\nunsigned IOCTL_OKIOCGVALUE = OKIOCGVALUE;\nunsigned IOCTL_KIOCGSIZE = KIOCGSIZE;\nunsigned IOCTL_KIOCGVALUE = KIOCGVALUE;\nunsigned IOCTL_KIOCGSYMBOL = KIOCGSYMBOL;\nunsigned IOCTL_LUAINFO = LUAINFO;\nunsigned IOCTL_LUACREATE = LUACREATE;\nunsigned IOCTL_LUADESTROY = LUADESTROY;\nunsigned IOCTL_LUAREQUIRE = LUAREQUIRE;\nunsigned IOCTL_LUALOAD = LUALOAD;\nunsigned IOCTL_MIDI_PRETIME = MIDI_PRETIME;\nunsigned IOCTL_MIDI_MPUMODE = MIDI_MPUMODE;\nunsigned IOCTL_MIDI_MPUCMD = MIDI_MPUCMD;\nunsigned IOCTL_SEQUENCER_RESET = SEQUENCER_RESET;\nunsigned IOCTL_SEQUENCER_SYNC = SEQUENCER_SYNC;\nunsigned IOCTL_SEQUENCER_INFO = SEQUENCER_INFO;\nunsigned IOCTL_SEQUENCER_CTRLRATE = SEQUENCER_CTRLRATE;\nunsigned IOCTL_SEQUENCER_GETOUTCOUNT = SEQUENCER_GETOUTCOUNT;\nunsigned IOCTL_SEQUENCER_GETINCOUNT = SEQUENCER_GETINCOUNT;\nunsigned IOCTL_SEQUENCER_RESETSAMPLES = SEQUENCER_RESETSAMPLES;\nunsigned IOCTL_SEQUENCER_NRSYNTHS = SEQUENCER_NRSYNTHS;\nunsigned IOCTL_SEQUENCER_NRMIDIS = SEQUENCER_NRMIDIS;\nunsigned IOCTL_SEQUENCER_THRESHOLD = SEQUENCER_THRESHOLD;\nunsigned IOCTL_SEQUENCER_MEMAVL = SEQUENCER_MEMAVL;\nunsigned IOCTL_SEQUENCER_PANIC = SEQUENCER_PANIC;\nunsigned IOCTL_SEQUENCER_OUTOFBAND = SEQUENCER_OUTOFBAND;\nunsigned IOCTL_SEQUENCER_GETTIME = SEQUENCER_GETTIME;\nunsigned IOCTL_SEQUENCER_TMR_TIMEBASE = SEQUENCER_TMR_TIMEBASE;\nunsigned IOCTL_SEQUENCER_TMR_START = SEQUENCER_TMR_START;\nunsigned IOCTL_SEQUENCER_TMR_STOP = SEQUENCER_TMR_STOP;\nunsigned IOCTL_SEQUENCER_TMR_CONTINUE = SEQUENCER_TMR_CONTINUE;\nunsigned IOCTL_SEQUENCER_TMR_TEMPO = SEQUENCER_TMR_TEMPO;\nunsigned IOCTL_SEQUENCER_TMR_SOURCE = SEQUENCER_TMR_SOURCE;\nunsigned IOCTL_SEQUENCER_TMR_METRONOME = SEQUENCER_TMR_METRONOME;\nunsigned IOCTL_SEQUENCER_TMR_SELECT = SEQUENCER_TMR_SELECT;\nunsigned IOCTL_MTIOCTOP = MTIOCTOP;\nunsigned IOCTL_MTIOCGET = MTIOCGET;\nunsigned IOCTL_MTIOCIEOT = MTIOCIEOT;\nunsigned IOCTL_MTIOCEEOT = MTIOCEEOT;\nunsigned IOCTL_MTIOCRDSPOS = MTIOCRDSPOS;\nunsigned IOCTL_MTIOCRDHPOS = MTIOCRDHPOS;\nunsigned IOCTL_MTIOCSLOCATE = MTIOCSLOCATE;\nunsigned IOCTL_MTIOCHLOCATE = MTIOCHLOCATE;\nunsigned IOCTL_POWER_EVENT_RECVDICT = POWER_EVENT_RECVDICT;\nunsigned IOCTL_POWER_IOC_GET_TYPE = POWER_IOC_GET_TYPE;\nunsigned IOCTL_RIOCGINFO = RIOCGINFO;\nunsigned IOCTL_RIOCSINFO = RIOCSINFO;\nunsigned IOCTL_RIOCSSRCH = RIOCSSRCH;\nunsigned IOCTL_RNDGETENTCNT = RNDGETENTCNT;\nunsigned IOCTL_RNDGETSRCNUM = RNDGETSRCNUM;\nunsigned IOCTL_RNDGETSRCNAME = RNDGETSRCNAME;\nunsigned IOCTL_RNDCTL = RNDCTL;\nunsigned IOCTL_RNDADDDATA = RNDADDDATA;\nunsigned IOCTL_RNDGETPOOLSTAT = RNDGETPOOLSTAT;\nunsigned IOCTL_RNDGETESTNUM = RNDGETESTNUM;\nunsigned IOCTL_RNDGETESTNAME = RNDGETESTNAME;\nunsigned IOCTL_SCIOCGET = SCIOCGET;\nunsigned IOCTL_SCIOCSET = SCIOCSET;\nunsigned IOCTL_SCIOCRESTART = SCIOCRESTART;\nunsigned IOCTL_SCIOC_USE_ADF = SCIOC_USE_ADF;\nunsigned IOCTL_SCIOCCOMMAND = SCIOCCOMMAND;\nunsigned IOCTL_SCIOCDEBUG = SCIOCDEBUG;\nunsigned IOCTL_SCIOCIDENTIFY = SCIOCIDENTIFY;\nunsigned IOCTL_OSCIOCIDENTIFY = OSCIOCIDENTIFY;\nunsigned IOCTL_SCIOCDECONFIG = SCIOCDECONFIG;\nunsigned IOCTL_SCIOCRECONFIG = SCIOCRECONFIG;\nunsigned IOCTL_SCIOCRESET = SCIOCRESET;\nunsigned IOCTL_SCBUSIOSCAN = SCBUSIOSCAN;\nunsigned IOCTL_SCBUSIORESET = SCBUSIORESET;\nunsigned IOCTL_SCBUSIODETACH = SCBUSIODETACH;\nunsigned IOCTL_SCBUSACCEL = SCBUSACCEL;\nunsigned IOCTL_SCBUSIOLLSCAN = SCBUSIOLLSCAN;\nunsigned IOCTL_SIOCSHIWAT = SIOCSHIWAT;\nunsigned IOCTL_SIOCGHIWAT = SIOCGHIWAT;\nunsigned IOCTL_SIOCSLOWAT = SIOCSLOWAT;\nunsigned IOCTL_SIOCGLOWAT = SIOCGLOWAT;\nunsigned IOCTL_SIOCATMARK = SIOCATMARK;\nunsigned IOCTL_SIOCSPGRP = SIOCSPGRP;\nunsigned IOCTL_SIOCGPGRP = SIOCGPGRP;\nunsigned IOCTL_SIOCPEELOFF = SIOCPEELOFF;\nunsigned IOCTL_SIOCADDRT = SIOCADDRT;\nunsigned IOCTL_SIOCDELRT = SIOCDELRT;\nunsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;\nunsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;\nunsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;\nunsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;\nunsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;\nunsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;\nunsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;\nunsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;\nunsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;\nunsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;\nunsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;\nunsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;\nunsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;\nunsigned IOCTL_SIOCDIFADDR = SIOCDIFADDR;\nunsigned IOCTL_SIOCAIFADDR = SIOCAIFADDR;\nunsigned IOCTL_SIOCGIFALIAS = SIOCGIFALIAS;\nunsigned IOCTL_SIOCGIFAFLAG_IN = SIOCGIFAFLAG_IN;\nunsigned IOCTL_SIOCALIFADDR = SIOCALIFADDR;\nunsigned IOCTL_SIOCGLIFADDR = SIOCGLIFADDR;\nunsigned IOCTL_SIOCDLIFADDR = SIOCDLIFADDR;\nunsigned IOCTL_SIOCSIFADDRPREF = SIOCSIFADDRPREF;\nunsigned IOCTL_SIOCGIFADDRPREF = SIOCGIFADDRPREF;\nunsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;\nunsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;\nunsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;\nunsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;\nunsigned IOCTL_SIOCSIFMEDIA = SIOCSIFMEDIA;\nunsigned IOCTL_SIOCGIFMEDIA = SIOCGIFMEDIA;\nunsigned IOCTL_SIOCSIFGENERIC = SIOCSIFGENERIC;\nunsigned IOCTL_SIOCGIFGENERIC = SIOCGIFGENERIC;\nunsigned IOCTL_SIOCSIFPHYADDR = SIOCSIFPHYADDR;\nunsigned IOCTL_SIOCGIFPSRCADDR = SIOCGIFPSRCADDR;\nunsigned IOCTL_SIOCGIFPDSTADDR = SIOCGIFPDSTADDR;\nunsigned IOCTL_SIOCDIFPHYADDR = SIOCDIFPHYADDR;\nunsigned IOCTL_SIOCSLIFPHYADDR = SIOCSLIFPHYADDR;\nunsigned IOCTL_SIOCGLIFPHYADDR = SIOCGLIFPHYADDR;\nunsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;\nunsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;\nunsigned IOCTL_SIOCSDRVSPEC = SIOCSDRVSPEC;\nunsigned IOCTL_SIOCGDRVSPEC = SIOCGDRVSPEC;\nunsigned IOCTL_SIOCIFCREATE = SIOCIFCREATE;\nunsigned IOCTL_SIOCIFDESTROY = SIOCIFDESTROY;\nunsigned IOCTL_SIOCIFGCLONERS = SIOCIFGCLONERS;\nunsigned IOCTL_SIOCGIFDLT = SIOCGIFDLT;\nunsigned IOCTL_SIOCGIFCAP = SIOCGIFCAP;\nunsigned IOCTL_SIOCSIFCAP = SIOCSIFCAP;\nunsigned IOCTL_SIOCSVH = SIOCSVH;\nunsigned IOCTL_SIOCGVH = SIOCGVH;\nunsigned IOCTL_SIOCINITIFADDR = SIOCINITIFADDR;\nunsigned IOCTL_SIOCGIFDATA = SIOCGIFDATA;\nunsigned IOCTL_SIOCZIFDATA = SIOCZIFDATA;\nunsigned IOCTL_SIOCGLINKSTR = SIOCGLINKSTR;\nunsigned IOCTL_SIOCSLINKSTR = SIOCSLINKSTR;\nunsigned IOCTL_SIOCGETHERCAP = SIOCGETHERCAP;\nunsigned IOCTL_SIOCGIFINDEX = SIOCGIFINDEX;\nunsigned IOCTL_SIOCSETHERCAP = SIOCSETHERCAP;\nunsigned IOCTL_SIOCSIFDESCR = SIOCSIFDESCR;\nunsigned IOCTL_SIOCGIFDESCR = SIOCGIFDESCR;\nunsigned IOCTL_SIOCGUMBINFO = SIOCGUMBINFO;\nunsigned IOCTL_SIOCSUMBPARAM = SIOCSUMBPARAM;\nunsigned IOCTL_SIOCGUMBPARAM = SIOCGUMBPARAM;\nunsigned IOCTL_SIOCSETPFSYNC = SIOCSETPFSYNC;\nunsigned IOCTL_SIOCGETPFSYNC = SIOCGETPFSYNC;\nunsigned IOCTL_PPS_IOC_CREATE = PPS_IOC_CREATE;\nunsigned IOCTL_PPS_IOC_DESTROY = PPS_IOC_DESTROY;\nunsigned IOCTL_PPS_IOC_SETPARAMS = PPS_IOC_SETPARAMS;\nunsigned IOCTL_PPS_IOC_GETPARAMS = PPS_IOC_GETPARAMS;\nunsigned IOCTL_PPS_IOC_GETCAP = PPS_IOC_GETCAP;\nunsigned IOCTL_PPS_IOC_FETCH = PPS_IOC_FETCH;\nunsigned IOCTL_PPS_IOC_KCBIND = PPS_IOC_KCBIND;\nunsigned IOCTL_TIOCEXCL = TIOCEXCL;\nunsigned IOCTL_TIOCNXCL = TIOCNXCL;\nunsigned IOCTL_TIOCFLUSH = TIOCFLUSH;\nunsigned IOCTL_TIOCGETA = TIOCGETA;\nunsigned IOCTL_TIOCSETA = TIOCSETA;\nunsigned IOCTL_TIOCSETAW = TIOCSETAW;\nunsigned IOCTL_TIOCSETAF = TIOCSETAF;\nunsigned IOCTL_TIOCGETD = TIOCGETD;\nunsigned IOCTL_TIOCSETD = TIOCSETD;\nunsigned IOCTL_TIOCGLINED = TIOCGLINED;\nunsigned IOCTL_TIOCSLINED = TIOCSLINED;\nunsigned IOCTL_TIOCSBRK = TIOCSBRK;\nunsigned IOCTL_TIOCCBRK = TIOCCBRK;\nunsigned IOCTL_TIOCSDTR = TIOCSDTR;\nunsigned IOCTL_TIOCCDTR = TIOCCDTR;\nunsigned IOCTL_TIOCGPGRP = TIOCGPGRP;\nunsigned IOCTL_TIOCSPGRP = TIOCSPGRP;\nunsigned IOCTL_TIOCOUTQ = TIOCOUTQ;\nunsigned IOCTL_TIOCSTI = TIOCSTI;\nunsigned IOCTL_TIOCNOTTY = TIOCNOTTY;\nunsigned IOCTL_TIOCPKT = TIOCPKT;\nunsigned IOCTL_TIOCSTOP = TIOCSTOP;\nunsigned IOCTL_TIOCSTART = TIOCSTART;\nunsigned IOCTL_TIOCMSET = TIOCMSET;\nunsigned IOCTL_TIOCMBIS = TIOCMBIS;\nunsigned IOCTL_TIOCMBIC = TIOCMBIC;\nunsigned IOCTL_TIOCMGET = TIOCMGET;\nunsigned IOCTL_TIOCREMOTE = TIOCREMOTE;\nunsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;\nunsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;\nunsigned IOCTL_TIOCUCNTL = TIOCUCNTL;\nunsigned IOCTL_TIOCSTAT = TIOCSTAT;\nunsigned IOCTL_TIOCGSID = TIOCGSID;\nunsigned IOCTL_TIOCCONS = TIOCCONS;\nunsigned IOCTL_TIOCSCTTY = TIOCSCTTY;\nunsigned IOCTL_TIOCEXT = TIOCEXT;\nunsigned IOCTL_TIOCSIG = TIOCSIG;\nunsigned IOCTL_TIOCDRAIN = TIOCDRAIN;\nunsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;\nunsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;\nunsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;\nunsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;\nunsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;\nunsigned IOCTL_TIOCPTMGET = TIOCPTMGET;\nunsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;\nunsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;\nunsigned IOCTL_TIOCSQSIZE = TIOCSQSIZE;\nunsigned IOCTL_TIOCGQSIZE = TIOCGQSIZE;\nunsigned IOCTL_VERIEXEC_LOAD = VERIEXEC_LOAD;\nunsigned IOCTL_VERIEXEC_TABLESIZE = VERIEXEC_TABLESIZE;\nunsigned IOCTL_VERIEXEC_DELETE = VERIEXEC_DELETE;\nunsigned IOCTL_VERIEXEC_QUERY = VERIEXEC_QUERY;\nunsigned IOCTL_VERIEXEC_DUMP = VERIEXEC_DUMP;\nunsigned IOCTL_VERIEXEC_FLUSH = VERIEXEC_FLUSH;\nunsigned IOCTL_VIDIOC_QUERYCAP = VIDIOC_QUERYCAP;\nunsigned IOCTL_VIDIOC_RESERVED = VIDIOC_RESERVED;\nunsigned IOCTL_VIDIOC_ENUM_FMT = VIDIOC_ENUM_FMT;\nunsigned IOCTL_VIDIOC_G_FMT = VIDIOC_G_FMT;\nunsigned IOCTL_VIDIOC_S_FMT = VIDIOC_S_FMT;\nunsigned IOCTL_VIDIOC_REQBUFS = VIDIOC_REQBUFS;\nunsigned IOCTL_VIDIOC_QUERYBUF = VIDIOC_QUERYBUF;\nunsigned IOCTL_VIDIOC_G_FBUF = VIDIOC_G_FBUF;\nunsigned IOCTL_VIDIOC_S_FBUF = VIDIOC_S_FBUF;\nunsigned IOCTL_VIDIOC_OVERLAY = VIDIOC_OVERLAY;\nunsigned IOCTL_VIDIOC_QBUF = VIDIOC_QBUF;\nunsigned IOCTL_VIDIOC_DQBUF = VIDIOC_DQBUF;\nunsigned IOCTL_VIDIOC_STREAMON = VIDIOC_STREAMON;\nunsigned IOCTL_VIDIOC_STREAMOFF = VIDIOC_STREAMOFF;\nunsigned IOCTL_VIDIOC_G_PARM = VIDIOC_G_PARM;\nunsigned IOCTL_VIDIOC_S_PARM = VIDIOC_S_PARM;\nunsigned IOCTL_VIDIOC_G_STD = VIDIOC_G_STD;\nunsigned IOCTL_VIDIOC_S_STD = VIDIOC_S_STD;\nunsigned IOCTL_VIDIOC_ENUMSTD = VIDIOC_ENUMSTD;\nunsigned IOCTL_VIDIOC_ENUMINPUT = VIDIOC_ENUMINPUT;\nunsigned IOCTL_VIDIOC_G_CTRL = VIDIOC_G_CTRL;\nunsigned IOCTL_VIDIOC_S_CTRL = VIDIOC_S_CTRL;\nunsigned IOCTL_VIDIOC_G_TUNER = VIDIOC_G_TUNER;\nunsigned IOCTL_VIDIOC_S_TUNER = VIDIOC_S_TUNER;\nunsigned IOCTL_VIDIOC_G_AUDIO = VIDIOC_G_AUDIO;\nunsigned IOCTL_VIDIOC_S_AUDIO = VIDIOC_S_AUDIO;\nunsigned IOCTL_VIDIOC_QUERYCTRL = VIDIOC_QUERYCTRL;\nunsigned IOCTL_VIDIOC_QUERYMENU = VIDIOC_QUERYMENU;\nunsigned IOCTL_VIDIOC_G_INPUT = VIDIOC_G_INPUT;\nunsigned IOCTL_VIDIOC_S_INPUT = VIDIOC_S_INPUT;\nunsigned IOCTL_VIDIOC_G_OUTPUT = VIDIOC_G_OUTPUT;\nunsigned IOCTL_VIDIOC_S_OUTPUT = VIDIOC_S_OUTPUT;\nunsigned IOCTL_VIDIOC_ENUMOUTPUT = VIDIOC_ENUMOUTPUT;\nunsigned IOCTL_VIDIOC_G_AUDOUT = VIDIOC_G_AUDOUT;\nunsigned IOCTL_VIDIOC_S_AUDOUT = VIDIOC_S_AUDOUT;\nunsigned IOCTL_VIDIOC_G_MODULATOR = VIDIOC_G_MODULATOR;\nunsigned IOCTL_VIDIOC_S_MODULATOR = VIDIOC_S_MODULATOR;\nunsigned IOCTL_VIDIOC_G_FREQUENCY = VIDIOC_G_FREQUENCY;\nunsigned IOCTL_VIDIOC_S_FREQUENCY = VIDIOC_S_FREQUENCY;\nunsigned IOCTL_VIDIOC_CROPCAP = VIDIOC_CROPCAP;\nunsigned IOCTL_VIDIOC_G_CROP = VIDIOC_G_CROP;\nunsigned IOCTL_VIDIOC_S_CROP = VIDIOC_S_CROP;\nunsigned IOCTL_VIDIOC_G_JPEGCOMP = VIDIOC_G_JPEGCOMP;\nunsigned IOCTL_VIDIOC_S_JPEGCOMP = VIDIOC_S_JPEGCOMP;\nunsigned IOCTL_VIDIOC_QUERYSTD = VIDIOC_QUERYSTD;\nunsigned IOCTL_VIDIOC_TRY_FMT = VIDIOC_TRY_FMT;\nunsigned IOCTL_VIDIOC_ENUMAUDIO = VIDIOC_ENUMAUDIO;\nunsigned IOCTL_VIDIOC_ENUMAUDOUT = VIDIOC_ENUMAUDOUT;\nunsigned IOCTL_VIDIOC_G_PRIORITY = VIDIOC_G_PRIORITY;\nunsigned IOCTL_VIDIOC_S_PRIORITY = VIDIOC_S_PRIORITY;\nunsigned IOCTL_VIDIOC_ENUM_FRAMESIZES = VIDIOC_ENUM_FRAMESIZES;\nunsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS = VIDIOC_ENUM_FRAMEINTERVALS;\nunsigned IOCTL_WDOGIOC_GMODE = WDOGIOC_GMODE;\nunsigned IOCTL_WDOGIOC_SMODE = WDOGIOC_SMODE;\nunsigned IOCTL_WDOGIOC_WHICH = WDOGIOC_WHICH;\nunsigned IOCTL_WDOGIOC_TICKLE = WDOGIOC_TICKLE;\nunsigned IOCTL_WDOGIOC_GTICKLER = WDOGIOC_GTICKLER;\nunsigned IOCTL_WDOGIOC_GWDOGS = WDOGIOC_GWDOGS;\nunsigned IOCTL_KCOV_IOC_SETBUFSIZE = KCOV_IOC_SETBUFSIZE;\nunsigned IOCTL_KCOV_IOC_ENABLE = KCOV_IOC_ENABLE;\nunsigned IOCTL_KCOV_IOC_DISABLE = KCOV_IOC_DISABLE;\nunsigned IOCTL_IPMICTL_RECEIVE_MSG_TRUNC = IPMICTL_RECEIVE_MSG_TRUNC;\nunsigned IOCTL_IPMICTL_RECEIVE_MSG = IPMICTL_RECEIVE_MSG;\nunsigned IOCTL_IPMICTL_SEND_COMMAND = IPMICTL_SEND_COMMAND;\nunsigned IOCTL_IPMICTL_REGISTER_FOR_CMD = IPMICTL_REGISTER_FOR_CMD;\nunsigned IOCTL_IPMICTL_UNREGISTER_FOR_CMD = IPMICTL_UNREGISTER_FOR_CMD;\nunsigned IOCTL_IPMICTL_SET_GETS_EVENTS_CMD = IPMICTL_SET_GETS_EVENTS_CMD;\nunsigned IOCTL_IPMICTL_SET_MY_ADDRESS_CMD = IPMICTL_SET_MY_ADDRESS_CMD;\nunsigned IOCTL_IPMICTL_GET_MY_ADDRESS_CMD = IPMICTL_GET_MY_ADDRESS_CMD;\nunsigned IOCTL_IPMICTL_SET_MY_LUN_CMD = IPMICTL_SET_MY_LUN_CMD;\nunsigned IOCTL_IPMICTL_GET_MY_LUN_CMD = IPMICTL_GET_MY_LUN_CMD;\nunsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;\nunsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;\nunsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;\nunsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;\nunsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;\nunsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;\nunsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;\nunsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS;\nunsigned IOCTL_SNDCTL_DSP_CHANNELS = SNDCTL_DSP_CHANNELS;\nunsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS;\nunsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;\nunsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER;\nunsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;\nunsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;\nunsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;\nunsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;\nunsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;\nunsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;\nunsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;\nunsigned IOCTL_SNDCTL_DSP_GETCAPS = SNDCTL_DSP_GETCAPS;\nunsigned IOCTL_SNDCTL_DSP_GETTRIGGER = SNDCTL_DSP_GETTRIGGER;\nunsigned IOCTL_SNDCTL_DSP_SETTRIGGER = SNDCTL_DSP_SETTRIGGER;\nunsigned IOCTL_SNDCTL_DSP_GETIPTR = SNDCTL_DSP_GETIPTR;\nunsigned IOCTL_SNDCTL_DSP_GETOPTR = SNDCTL_DSP_GETOPTR;\nunsigned IOCTL_SNDCTL_DSP_MAPINBUF = SNDCTL_DSP_MAPINBUF;\nunsigned IOCTL_SNDCTL_DSP_MAPOUTBUF = SNDCTL_DSP_MAPOUTBUF;\nunsigned IOCTL_SNDCTL_DSP_SETSYNCRO = SNDCTL_DSP_SETSYNCRO;\nunsigned IOCTL_SNDCTL_DSP_SETDUPLEX = SNDCTL_DSP_SETDUPLEX;\nunsigned IOCTL_SNDCTL_DSP_PROFILE = SNDCTL_DSP_PROFILE;\nunsigned IOCTL_SNDCTL_DSP_GETODELAY = SNDCTL_DSP_GETODELAY;\nunsigned IOCTL_SOUND_MIXER_INFO = SOUND_MIXER_INFO;\nunsigned IOCTL_SOUND_OLD_MIXER_INFO = SOUND_OLD_MIXER_INFO;\nunsigned IOCTL_OSS_GETVERSION = OSS_GETVERSION;\nunsigned IOCTL_SNDCTL_SYSINFO = SNDCTL_SYSINFO;\nunsigned IOCTL_SNDCTL_AUDIOINFO = SNDCTL_AUDIOINFO;\nunsigned IOCTL_SNDCTL_ENGINEINFO = SNDCTL_ENGINEINFO;\nunsigned IOCTL_SNDCTL_DSP_GETPLAYVOL = SNDCTL_DSP_GETPLAYVOL;\nunsigned IOCTL_SNDCTL_DSP_SETPLAYVOL = SNDCTL_DSP_SETPLAYVOL;\nunsigned IOCTL_SNDCTL_DSP_GETRECVOL = SNDCTL_DSP_GETRECVOL;\nunsigned IOCTL_SNDCTL_DSP_SETRECVOL = SNDCTL_DSP_SETRECVOL;\nunsigned IOCTL_SNDCTL_DSP_SKIP = SNDCTL_DSP_SKIP;\nunsigned IOCTL_SNDCTL_DSP_SILENCE = SNDCTL_DSP_SILENCE;\n\nconst int si_SEGV_MAPERR = SEGV_MAPERR;\nconst int si_SEGV_ACCERR = SEGV_ACCERR;\n\nconst int modctl_load = MODCTL_LOAD;\nconst int modctl_unload = MODCTL_UNLOAD;\nconst int modctl_stat = MODCTL_STAT;\nconst int modctl_exists = MODCTL_EXISTS;\n\nconst unsigned SHA1_CTX_sz = sizeof(SHA1_CTX);\nconst unsigned SHA1_return_length = SHA1_DIGEST_STRING_LENGTH;\n\nconst unsigned MD4_CTX_sz = sizeof(MD4_CTX);\nconst unsigned MD4_return_length = MD4_DIGEST_STRING_LENGTH;\n\nconst unsigned RMD160_CTX_sz = sizeof(RMD160_CTX);\nconst unsigned RMD160_return_length = RMD160_DIGEST_STRING_LENGTH;\n\nconst unsigned MD5_CTX_sz = sizeof(MD5_CTX);\nconst unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;\n\nconst unsigned fpos_t_sz = sizeof(fpos_t);\n\nconst unsigned MD2_CTX_sz = sizeof(MD2_CTX);\nconst unsigned MD2_return_length = MD2_DIGEST_STRING_LENGTH;\n\n#define SHA2_CONST(LEN)                                                      \\\n  const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX);                 \\\n  const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \\\n  const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH;          \\\n  const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH\n\nSHA2_CONST(224);\nSHA2_CONST(256);\nSHA2_CONST(384);\nSHA2_CONST(512);\n\n#undef SHA2_CONST\n\nconst int unvis_valid = UNVIS_VALID;\nconst int unvis_validpush = UNVIS_VALIDPUSH;\n}  // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nCOMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));\n\nCOMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));\nCHECK_TYPE_SIZE(pthread_key_t);\n\n// There are more undocumented fields in dl_phdr_info that we are not interested\n// in.\nCOMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);\n\nCHECK_TYPE_SIZE(glob_t);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_offs);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_flags);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_stat);\n\nCHECK_TYPE_SIZE(addrinfo);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_family);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);\n\nCHECK_TYPE_SIZE(hostent);\nCHECK_SIZE_AND_OFFSET(hostent, h_name);\nCHECK_SIZE_AND_OFFSET(hostent, h_aliases);\nCHECK_SIZE_AND_OFFSET(hostent, h_addrtype);\nCHECK_SIZE_AND_OFFSET(hostent, h_length);\nCHECK_SIZE_AND_OFFSET(hostent, h_addr_list);\n\nCHECK_TYPE_SIZE(iovec);\nCHECK_SIZE_AND_OFFSET(iovec, iov_base);\nCHECK_SIZE_AND_OFFSET(iovec, iov_len);\n\nCHECK_TYPE_SIZE(msghdr);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_name);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iov);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_control);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_flags);\n\nCHECK_TYPE_SIZE(cmsghdr);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);\n\nCOMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));\nCHECK_SIZE_AND_OFFSET(dirent, d_fileno);\nCHECK_SIZE_AND_OFFSET(dirent, d_reclen);\n\nCHECK_TYPE_SIZE(ifconf);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_len);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);\n\nCHECK_TYPE_SIZE(pollfd);\nCHECK_SIZE_AND_OFFSET(pollfd, fd);\nCHECK_SIZE_AND_OFFSET(pollfd, events);\nCHECK_SIZE_AND_OFFSET(pollfd, revents);\n\nCHECK_TYPE_SIZE(nfds_t);\n\nCHECK_TYPE_SIZE(sigset_t);\n\nCOMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));\n// Can't write checks for sa_handler and sa_sigaction due to them being\n// preprocessor macros.\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);\n\nCHECK_TYPE_SIZE(wordexp_t);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);\n\nCOMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));\nCHECK_SIZE_AND_OFFSET(FILE, _p);\nCHECK_SIZE_AND_OFFSET(FILE, _r);\nCHECK_SIZE_AND_OFFSET(FILE, _w);\nCHECK_SIZE_AND_OFFSET(FILE, _flags);\nCHECK_SIZE_AND_OFFSET(FILE, _file);\nCHECK_SIZE_AND_OFFSET(FILE, _bf);\nCHECK_SIZE_AND_OFFSET(FILE, _lbfsize);\nCHECK_SIZE_AND_OFFSET(FILE, _cookie);\nCHECK_SIZE_AND_OFFSET(FILE, _close);\nCHECK_SIZE_AND_OFFSET(FILE, _read);\nCHECK_SIZE_AND_OFFSET(FILE, _seek);\nCHECK_SIZE_AND_OFFSET(FILE, _write);\nCHECK_SIZE_AND_OFFSET(FILE, _ext);\nCHECK_SIZE_AND_OFFSET(FILE, _up);\nCHECK_SIZE_AND_OFFSET(FILE, _ur);\nCHECK_SIZE_AND_OFFSET(FILE, _ubuf);\nCHECK_SIZE_AND_OFFSET(FILE, _nbuf);\nCHECK_SIZE_AND_OFFSET(FILE, _flush);\nCHECK_SIZE_AND_OFFSET(FILE, _lb_unused);\nCHECK_SIZE_AND_OFFSET(FILE, _blksize);\nCHECK_SIZE_AND_OFFSET(FILE, _offset);\n\nCHECK_TYPE_SIZE(tm);\nCHECK_SIZE_AND_OFFSET(tm, tm_sec);\nCHECK_SIZE_AND_OFFSET(tm, tm_min);\nCHECK_SIZE_AND_OFFSET(tm, tm_hour);\nCHECK_SIZE_AND_OFFSET(tm, tm_mday);\nCHECK_SIZE_AND_OFFSET(tm, tm_mon);\nCHECK_SIZE_AND_OFFSET(tm, tm_year);\nCHECK_SIZE_AND_OFFSET(tm, tm_wday);\nCHECK_SIZE_AND_OFFSET(tm, tm_yday);\nCHECK_SIZE_AND_OFFSET(tm, tm_isdst);\nCHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);\nCHECK_SIZE_AND_OFFSET(tm, tm_zone);\n\nCHECK_TYPE_SIZE(ether_addr);\n\nCHECK_TYPE_SIZE(ipc_perm);\nCHECK_SIZE_AND_OFFSET(ipc_perm, _key);\nCHECK_SIZE_AND_OFFSET(ipc_perm, _seq);\nCHECK_SIZE_AND_OFFSET(ipc_perm, uid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, gid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cuid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cgid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, mode);\n\nCHECK_TYPE_SIZE(shmid_ds);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);\n\nCHECK_TYPE_SIZE(clock_t);\n\nCHECK_TYPE_SIZE(ifaddrs);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);\n// Compare against the union, because we can't reach into the union in a\n// compliant way.\n#ifdef ifa_dstaddr\n#undef ifa_dstaddr\n#endif\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);\n\nCHECK_TYPE_SIZE(timeb);\nCHECK_SIZE_AND_OFFSET(timeb, time);\nCHECK_SIZE_AND_OFFSET(timeb, millitm);\nCHECK_SIZE_AND_OFFSET(timeb, timezone);\nCHECK_SIZE_AND_OFFSET(timeb, dstflag);\n\nCHECK_TYPE_SIZE(passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_name);\nCHECK_SIZE_AND_OFFSET(passwd, pw_passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_uid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_gid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_dir);\nCHECK_SIZE_AND_OFFSET(passwd, pw_shell);\n\nCHECK_SIZE_AND_OFFSET(passwd, pw_gecos);\n\nCHECK_TYPE_SIZE(group);\nCHECK_SIZE_AND_OFFSET(group, gr_name);\nCHECK_SIZE_AND_OFFSET(group, gr_passwd);\nCHECK_SIZE_AND_OFFSET(group, gr_gid);\nCHECK_SIZE_AND_OFFSET(group, gr_mem);\n\nCHECK_TYPE_SIZE(modctl_load_t);\nCHECK_SIZE_AND_OFFSET(modctl_load_t, ml_filename);\nCHECK_SIZE_AND_OFFSET(modctl_load_t, ml_flags);\nCHECK_SIZE_AND_OFFSET(modctl_load_t, ml_props);\nCHECK_SIZE_AND_OFFSET(modctl_load_t, ml_propslen);\n\n// Compat with 9.0\nstruct statvfs90 {\n  unsigned long f_flag;\n  unsigned long f_bsize;\n  unsigned long f_frsize;\n  unsigned long f_iosize;\n\n  u64 f_blocks;\n  u64 f_bfree;\n  u64 f_bavail;\n  u64 f_bresvd;\n\n  u64 f_files;\n  u64 f_ffree;\n  u64 f_favail;\n  u64 f_fresvd;\n\n  u64 f_syncreads;\n  u64 f_syncwrites;\n\n  u64 f_asyncreads;\n  u64 f_asyncwrites;\n\n  struct {\n    s32 __fsid_val[2];\n  } f_fsidx;\n  unsigned long f_fsid;\n  unsigned long f_namemax;\n  u32 f_owner;\n\n  u32 f_spare[4];\n\n  char f_fstypename[32];\n  char f_mntonname[32];\n  char f_mntfromname[32];\n};\nunsigned struct_statvfs90_sz = sizeof(struct statvfs90);\n\n#endif  // SANITIZER_NETBSD\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_netbsd.h",
    "content": "//===-- sanitizer_platform_limits_netbsd.h --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific NetBSD data structures.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PLATFORM_LIMITS_NETBSD_H\n#define SANITIZER_PLATFORM_LIMITS_NETBSD_H\n\n#if SANITIZER_NETBSD\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform.h\"\n\nnamespace __sanitizer {\nvoid *__sanitizer_get_link_map_by_dlopen_handle(void *handle);\n#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \\\n  (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)\n\nextern unsigned struct_utsname_sz;\nextern unsigned struct_stat_sz;\nextern unsigned struct_rusage_sz;\nextern unsigned siginfo_t_sz;\nextern unsigned struct_itimerval_sz;\nextern unsigned pthread_t_sz;\nextern unsigned pthread_mutex_t_sz;\nextern unsigned pthread_cond_t_sz;\nextern unsigned pid_t_sz;\nextern unsigned timeval_sz;\nextern unsigned uid_t_sz;\nextern unsigned gid_t_sz;\nextern unsigned mbstate_t_sz;\nextern unsigned struct_timezone_sz;\nextern unsigned struct_tms_sz;\nextern unsigned struct_itimerspec_sz;\nextern unsigned struct_sigevent_sz;\nextern unsigned struct_stack_t_sz;\nextern unsigned struct_sched_param_sz;\nextern unsigned struct_statfs_sz;\nextern unsigned struct_sockaddr_sz;\nunsigned ucontext_t_sz(void *ctx);\n\nextern unsigned struct_rlimit_sz;\nextern unsigned struct_utimbuf_sz;\nextern unsigned struct_timespec_sz;\nextern unsigned struct_sembuf_sz;\n\nextern unsigned struct_kevent_sz;\nextern unsigned struct_FTS_sz;\nextern unsigned struct_FTSENT_sz;\n\nextern unsigned struct_regex_sz;\nextern unsigned struct_regmatch_sz;\n\nextern unsigned struct_fstab_sz;\n\nstruct __sanitizer_regmatch {\n  OFF_T rm_so;\n  OFF_T rm_eo;\n};\n\ntypedef struct __sanitizer_modctl_load {\n  const char *ml_filename;\n  int ml_flags;\n  const char *ml_props;\n  uptr ml_propslen;\n} __sanitizer_modctl_load_t;\nextern const int modctl_load;\nextern const int modctl_unload;\nextern const int modctl_stat;\nextern const int modctl_exists;\n\nunion __sanitizer_sigval {\n  int sival_int;\n  uptr sival_ptr;\n};\n\nstruct __sanitizer_sigevent {\n  int sigev_notify;\n  int sigev_signo;\n  union __sanitizer_sigval sigev_value;\n  uptr sigev_notify_function;\n  uptr sigev_notify_attributes;\n};\n\nstruct __sanitizer_aiocb {\n  u64 aio_offset;\n  uptr aio_buf;\n  uptr aio_nbytes;\n  int aio_fildes;\n  int aio_lio_opcode;\n  int aio_reqprio;\n  struct __sanitizer_sigevent aio_sigevent;\n  int _state;\n  int _errno;\n  long _retval;\n};\n\nstruct __sanitizer_sem_t {\n  uptr data[5];\n};\n\nstruct __sanitizer_ipc_perm {\n  u32 uid;\n  u32 gid;\n  u32 cuid;\n  u32 cgid;\n  u32 mode;\n  unsigned short _seq;\n  long _key;\n};\n\nstruct __sanitizer_shmid_ds {\n  __sanitizer_ipc_perm shm_perm;\n  unsigned long shm_segsz;\n  u32 shm_lpid;\n  u32 shm_cpid;\n  unsigned int shm_nattch;\n  u64 shm_atime;\n  u64 shm_dtime;\n  u64 shm_ctime;\n  void *_shm_internal;\n};\n\nstruct __sanitizer_protoent {\n  char *p_name;\n  char **p_aliases;\n  int p_proto;\n};\n\nstruct __sanitizer_netent {\n  char *n_name;\n  char **n_aliases;\n  int n_addrtype;\n  u32 n_net;\n};\n\nextern unsigned struct_msqid_ds_sz;\nextern unsigned struct_mq_attr_sz;\nextern unsigned struct_timex_sz;\nextern unsigned struct_statvfs_sz;\n\nstruct __sanitizer_iovec {\n  void *iov_base;\n  uptr iov_len;\n};\n\nstruct __sanitizer_ifaddrs {\n  struct __sanitizer_ifaddrs *ifa_next;\n  char *ifa_name;\n  unsigned int ifa_flags;\n  void *ifa_addr;     // (struct sockaddr *)\n  void *ifa_netmask;  // (struct sockaddr *)\n  void *ifa_dstaddr;  // (struct sockaddr *)\n  void *ifa_data;\n  unsigned int ifa_addrflags;\n};\n\ntypedef unsigned int __sanitizer_socklen_t;\n\ntypedef unsigned __sanitizer_pthread_key_t;\n\ntypedef long long __sanitizer_time_t;\ntypedef int __sanitizer_suseconds_t;\n\nstruct __sanitizer_timeval {\n  __sanitizer_time_t tv_sec;\n  __sanitizer_suseconds_t tv_usec;\n};\n\nstruct __sanitizer_itimerval {\n  struct __sanitizer_timeval it_interval;\n  struct __sanitizer_timeval it_value;\n};\n\nstruct __sanitizer_timespec {\n  __sanitizer_time_t tv_sec;\n  long tv_nsec;\n};\n\nstruct __sanitizer_passwd {\n  char *pw_name;\n  char *pw_passwd;\n  int pw_uid;\n  int pw_gid;\n  __sanitizer_time_t pw_change;\n  char *pw_class;\n  char *pw_gecos;\n  char *pw_dir;\n  char *pw_shell;\n  __sanitizer_time_t pw_expire;\n};\n\nstruct __sanitizer_group {\n  char *gr_name;\n  char *gr_passwd;\n  int gr_gid;\n  char **gr_mem;\n};\n\nstruct __sanitizer_timeb {\n  __sanitizer_time_t time;\n  unsigned short millitm;\n  short timezone;\n  short dstflag;\n};\n\nstruct __sanitizer_ether_addr {\n  u8 octet[6];\n};\n\nstruct __sanitizer_tm {\n  int tm_sec;\n  int tm_min;\n  int tm_hour;\n  int tm_mday;\n  int tm_mon;\n  int tm_year;\n  int tm_wday;\n  int tm_yday;\n  int tm_isdst;\n  long int tm_gmtoff;\n  const char *tm_zone;\n};\n\nstruct __sanitizer_msghdr {\n  void *msg_name;\n  unsigned msg_namelen;\n  struct __sanitizer_iovec *msg_iov;\n  unsigned msg_iovlen;\n  void *msg_control;\n  unsigned msg_controllen;\n  int msg_flags;\n};\n\nstruct __sanitizer_mmsghdr {\n  struct __sanitizer_msghdr msg_hdr;\n  unsigned int msg_len;\n};\n\nstruct __sanitizer_cmsghdr {\n  unsigned cmsg_len;\n  int cmsg_level;\n  int cmsg_type;\n};\n\nstruct __sanitizer_dirent {\n  u64 d_fileno;\n  u16 d_reclen;\n  // more fields that we don't care about\n};\n\ntypedef int __sanitizer_clock_t;\ntypedef int __sanitizer_clockid_t;\n\ntypedef u32 __sanitizer___kernel_uid_t;\ntypedef u32 __sanitizer___kernel_gid_t;\ntypedef u64 __sanitizer___kernel_off_t;\ntypedef struct {\n  u32 fds_bits[8];\n} __sanitizer___kernel_fd_set;\n\ntypedef struct {\n  unsigned int pta_magic;\n  int pta_flags;\n  void *pta_private;\n} __sanitizer_pthread_attr_t;\n\nstruct __sanitizer_sigset_t {\n  // uint32_t * 4\n  unsigned int __bits[4];\n};\n\nstruct __sanitizer_siginfo {\n  // The size is determined by looking at sizeof of real siginfo_t on linux.\n  u64 opaque[128 / sizeof(u64)];\n};\n\nusing __sanitizer_sighandler_ptr = void (*)(int sig);\nusing __sanitizer_sigactionhandler_ptr = void (*)(int sig,\n                                                  __sanitizer_siginfo *siginfo,\n                                                  void *uctx);\n\nstruct __sanitizer_sigaction {\n  union {\n    __sanitizer_sighandler_ptr handler;\n    __sanitizer_sigactionhandler_ptr sigaction;\n  };\n  __sanitizer_sigset_t sa_mask;\n  int sa_flags;\n};\n\nextern unsigned struct_sigaltstack_sz;\n\ntypedef unsigned int __sanitizer_sigset13_t;\n\nstruct __sanitizer_sigaction13 {\n  __sanitizer_sighandler_ptr osa_handler;\n  __sanitizer_sigset13_t osa_mask;\n  int osa_flags;\n};\n\nstruct __sanitizer_sigaltstack {\n  void *ss_sp;\n  uptr ss_size;\n  int ss_flags;\n};\n\ntypedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;\n\nstruct __sanitizer_kernel_sigaction_t {\n  union {\n    void (*handler)(int signo);\n    void (*sigaction)(int signo, void *info, void *ctx);\n  };\n  unsigned long sa_flags;\n  void (*sa_restorer)(void);\n  __sanitizer_kernel_sigset_t sa_mask;\n};\n\nextern const uptr sig_ign;\nextern const uptr sig_dfl;\nextern const uptr sig_err;\nextern const uptr sa_siginfo;\n\nextern int af_inet;\nextern int af_inet6;\nuptr __sanitizer_in_addr_sz(int af);\n\nstruct __sanitizer_dl_phdr_info {\n  uptr dlpi_addr;\n  const char *dlpi_name;\n  const void *dlpi_phdr;\n  short dlpi_phnum;\n};\n\nextern unsigned struct_ElfW_Phdr_sz;\n\nstruct __sanitizer_addrinfo {\n  int ai_flags;\n  int ai_family;\n  int ai_socktype;\n  int ai_protocol;\n#if defined(__sparc__) && defined(_LP64)\n  int __ai_pad0;\n#endif\n  unsigned ai_addrlen;\n#if defined(__alpha__) || (defined(__i386__) && defined(_LP64))\n  int __ai_pad0;\n#endif\n  char *ai_canonname;\n  void *ai_addr;\n  struct __sanitizer_addrinfo *ai_next;\n};\n\nstruct __sanitizer_hostent {\n  char *h_name;\n  char **h_aliases;\n  int h_addrtype;\n  int h_length;\n  char **h_addr_list;\n};\n\nstruct __sanitizer_pollfd {\n  int fd;\n  short events;\n  short revents;\n};\n\ntypedef unsigned __sanitizer_nfds_t;\n\ntypedef int __sanitizer_lwpid_t;\n\nstruct __sanitizer_glob_t {\n  uptr gl_pathc;\n  uptr gl_matchc;\n  uptr gl_offs;\n  int gl_flags;\n  char **gl_pathv;\n  int (*gl_errfunc)(const char *, int);\n  void (*gl_closedir)(void *dirp);\n  struct dirent *(*gl_readdir)(void *dirp);\n  void *(*gl_opendir)(const char *);\n  int (*gl_lstat)(const char *, void * /* struct stat* */);\n  int (*gl_stat)(const char *, void * /* struct stat* */);\n};\n\nextern int glob_nomatch;\nextern int glob_altdirfunc;\nextern const int wordexp_wrde_dooffs;\n\nextern unsigned path_max;\n\nextern int struct_ttyent_sz;\n\nextern int ptrace_pt_io;\nextern int ptrace_pt_lwpinfo;\nextern int ptrace_pt_set_event_mask;\nextern int ptrace_pt_get_event_mask;\nextern int ptrace_pt_get_process_state;\nextern int ptrace_pt_set_siginfo;\nextern int ptrace_pt_get_siginfo;\nextern int ptrace_pt_lwpstatus;\nextern int ptrace_pt_lwpnext;\nextern int ptrace_piod_read_d;\nextern int ptrace_piod_write_d;\nextern int ptrace_piod_read_i;\nextern int ptrace_piod_write_i;\nextern int ptrace_piod_read_auxv;\nextern int ptrace_pt_setregs;\nextern int ptrace_pt_getregs;\nextern int ptrace_pt_setfpregs;\nextern int ptrace_pt_getfpregs;\nextern int ptrace_pt_setdbregs;\nextern int ptrace_pt_getdbregs;\n\nstruct __sanitizer_ptrace_io_desc {\n  int piod_op;\n  void *piod_offs;\n  void *piod_addr;\n  uptr piod_len;\n};\n\nstruct __sanitizer_ptrace_lwpinfo {\n  __sanitizer_lwpid_t pl_lwpid;\n  int pl_event;\n};\n\nstruct __sanitizer_ptrace_lwpstatus {\n  __sanitizer_lwpid_t pl_lwpid;\n  __sanitizer_sigset_t pl_sigpend;\n  __sanitizer_sigset_t pl_sigmask;\n  char pl_name[20];\n  void *pl_private;\n};\n\nextern unsigned struct_ptrace_ptrace_io_desc_struct_sz;\nextern unsigned struct_ptrace_ptrace_lwpinfo_struct_sz;\nextern unsigned struct_ptrace_ptrace_lwpstatus_struct_sz;\nextern unsigned struct_ptrace_ptrace_event_struct_sz;\nextern unsigned struct_ptrace_ptrace_siginfo_struct_sz;\n\nextern unsigned struct_ptrace_reg_struct_sz;\nextern unsigned struct_ptrace_fpreg_struct_sz;\nextern unsigned struct_ptrace_dbreg_struct_sz;\n\nstruct __sanitizer_wordexp_t {\n  uptr we_wordc;\n  char **we_wordv;\n  uptr we_offs;\n  char *we_strings;\n  uptr we_nbytes;\n};\n\nstruct __sanitizer_FILE {\n  unsigned char *_p;\n  int _r;\n  int _w;\n  unsigned short _flags;\n  short _file;\n  struct {\n    unsigned char *_base;\n    int _size;\n  } _bf;\n  int _lbfsize;\n  void *_cookie;\n  int (*_close)(void *ptr);\n  u64 (*_read)(void *, void *, uptr);\n  u64 (*_seek)(void *, u64, int);\n  uptr (*_write)(void *, const void *, uptr);\n  struct {\n    unsigned char *_base;\n    int _size;\n  } _ext;\n  unsigned char *_up;\n  int _ur;\n  unsigned char _ubuf[3];\n  unsigned char _nbuf[1];\n  int (*_flush)(void *ptr);\n  char _lb_unused[sizeof(uptr)];\n  int _blksize;\n  u64 _offset;\n};\n#define SANITIZER_HAS_STRUCT_FILE 1\n\nextern int shmctl_ipc_stat;\n\n// This simplifies generic code\n#define struct_shminfo_sz -1\n#define struct_shm_info_sz -1\n#define shmctl_shm_stat -1\n#define shmctl_ipc_info -1\n#define shmctl_shm_info -1\n\nextern unsigned struct_utmp_sz;\nextern unsigned struct_utmpx_sz;\n\nextern int map_fixed;\n\n// ioctl arguments\nstruct __sanitizer_ifconf {\n  int ifc_len;\n  union {\n    void *ifcu_req;\n  } ifc_ifcu;\n};\n\nstruct __sanitizer_ttyent {\n  char *ty_name;\n  char *ty_getty;\n  char *ty_type;\n  int ty_status;\n  char *ty_window;\n  char *ty_comment;\n  char *ty_class;\n};\n\nextern const unsigned long __sanitizer_bufsiz;\n\n#define IOC_NRBITS 8\n#define IOC_TYPEBITS 8\n#define IOC_SIZEBITS 14\n#define IOC_DIRBITS 2\n#define IOC_NONE 0U\n#define IOC_WRITE 1U\n#define IOC_READ 2U\n#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)\n#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)\n#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)\n#undef IOC_DIRMASK\n#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)\n#define IOC_NRSHIFT 0\n#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)\n#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)\n#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)\n#define EVIOC_EV_MAX 0x1f\n#define EVIOC_ABS_MAX 0x3f\n\n#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)\n#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)\n#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)\n#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)\n\n// ioctl request identifiers\n\nextern unsigned struct_altqreq_sz;\nextern unsigned struct_amr_user_ioctl_sz;\nextern unsigned struct_ap_control_sz;\nextern unsigned struct_apm_ctl_sz;\nextern unsigned struct_apm_event_info_sz;\nextern unsigned struct_apm_power_info_sz;\nextern unsigned struct_atabusiodetach_args_sz;\nextern unsigned struct_atabusioscan_args_sz;\nextern unsigned struct_ath_diag_sz;\nextern unsigned struct_atm_flowmap_sz;\nextern unsigned struct_audio_buf_info_sz;\nextern unsigned struct_audio_device_sz;\nextern unsigned struct_audio_encoding_sz;\nextern unsigned struct_audio_info_sz;\nextern unsigned struct_audio_offset_sz;\nextern unsigned struct_bio_locate_sz;\nextern unsigned struct_bioc_alarm_sz;\nextern unsigned struct_bioc_blink_sz;\nextern unsigned struct_bioc_disk_sz;\nextern unsigned struct_bioc_inq_sz;\nextern unsigned struct_bioc_setstate_sz;\nextern unsigned struct_bioc_vol_sz;\nextern unsigned struct_bioc_volops_sz;\nextern unsigned struct_bktr_chnlset_sz;\nextern unsigned struct_bktr_remote_sz;\nextern unsigned struct_blue_conf_sz;\nextern unsigned struct_blue_interface_sz;\nextern unsigned struct_blue_stats_sz;\nextern unsigned struct_bpf_dltlist_sz;\nextern unsigned struct_bpf_program_sz;\nextern unsigned struct_bpf_stat_old_sz;\nextern unsigned struct_bpf_stat_sz;\nextern unsigned struct_bpf_version_sz;\nextern unsigned struct_btreq_sz;\nextern unsigned struct_btsco_info_sz;\nextern unsigned struct_buffmem_desc_sz;\nextern unsigned struct_cbq_add_class_sz;\nextern unsigned struct_cbq_add_filter_sz;\nextern unsigned struct_cbq_delete_class_sz;\nextern unsigned struct_cbq_delete_filter_sz;\nextern unsigned struct_cbq_getstats_sz;\nextern unsigned struct_cbq_interface_sz;\nextern unsigned struct_cbq_modify_class_sz;\nextern unsigned struct_ccd_ioctl_sz;\nextern unsigned struct_cdnr_add_element_sz;\nextern unsigned struct_cdnr_add_filter_sz;\nextern unsigned struct_cdnr_add_tbmeter_sz;\nextern unsigned struct_cdnr_add_trtcm_sz;\nextern unsigned struct_cdnr_add_tswtcm_sz;\nextern unsigned struct_cdnr_delete_element_sz;\nextern unsigned struct_cdnr_delete_filter_sz;\nextern unsigned struct_cdnr_get_stats_sz;\nextern unsigned struct_cdnr_interface_sz;\nextern unsigned struct_cdnr_modify_tbmeter_sz;\nextern unsigned struct_cdnr_modify_trtcm_sz;\nextern unsigned struct_cdnr_modify_tswtcm_sz;\nextern unsigned struct_cdnr_tbmeter_stats_sz;\nextern unsigned struct_cdnr_tcm_stats_sz;\nextern unsigned struct_cgd_ioctl_sz;\nextern unsigned struct_cgd_user_sz;\nextern unsigned struct_changer_element_status_request_sz;\nextern unsigned struct_changer_exchange_request_sz;\nextern unsigned struct_changer_move_request_sz;\nextern unsigned struct_changer_params_sz;\nextern unsigned struct_changer_position_request_sz;\nextern unsigned struct_changer_set_voltag_request_sz;\nextern unsigned struct_clockctl_adjtime_sz;\nextern unsigned struct_clockctl_clock_settime_sz;\nextern unsigned struct_clockctl_ntp_adjtime_sz;\nextern unsigned struct_clockctl_settimeofday_sz;\nextern unsigned struct_cnwistats_sz;\nextern unsigned struct_cnwitrail_sz;\nextern unsigned struct_cnwstatus_sz;\nextern unsigned struct_count_info_sz;\nextern unsigned struct_cpu_ucode_sz;\nextern unsigned struct_cpu_ucode_version_sz;\nextern unsigned struct_crypt_kop_sz;\nextern unsigned struct_crypt_mkop_sz;\nextern unsigned struct_crypt_mop_sz;\nextern unsigned struct_crypt_op_sz;\nextern unsigned struct_crypt_result_sz;\nextern unsigned struct_crypt_sfop_sz;\nextern unsigned struct_crypt_sgop_sz;\nextern unsigned struct_cryptret_sz;\nextern unsigned struct_devdetachargs_sz;\nextern unsigned struct_devlistargs_sz;\nextern unsigned struct_devpmargs_sz;\nextern unsigned struct_devrescanargs_sz;\nextern unsigned struct_disk_badsecinfo_sz;\nextern unsigned struct_disk_strategy_sz;\nextern unsigned struct_disklabel_sz;\nextern unsigned struct_dkbad_sz;\nextern unsigned struct_dkwedge_info_sz;\nextern unsigned struct_dkwedge_list_sz;\nextern unsigned struct_dmio_setfunc_sz;\nextern unsigned struct_dmx_pes_filter_params_sz;\nextern unsigned struct_dmx_sct_filter_params_sz;\nextern unsigned struct_dmx_stc_sz;\nextern unsigned struct_dvb_diseqc_master_cmd_sz;\nextern unsigned struct_dvb_diseqc_slave_reply_sz;\nextern unsigned struct_dvb_frontend_event_sz;\nextern unsigned struct_dvb_frontend_info_sz;\nextern unsigned struct_dvb_frontend_parameters_sz;\nextern unsigned struct_eccapreq_sz;\nextern unsigned struct_fbcmap_sz;\nextern unsigned struct_fbcurpos_sz;\nextern unsigned struct_fbcursor_sz;\nextern unsigned struct_fbgattr_sz;\nextern unsigned struct_fbsattr_sz;\nextern unsigned struct_fbtype_sz;\nextern unsigned struct_fdformat_cmd_sz;\nextern unsigned struct_fdformat_parms_sz;\nextern unsigned struct_fifoq_conf_sz;\nextern unsigned struct_fifoq_getstats_sz;\nextern unsigned struct_fifoq_interface_sz;\nextern unsigned struct_format_op_sz;\nextern unsigned struct_fss_get_sz;\nextern unsigned struct_fss_set_sz;\nextern unsigned struct_gpio_attach_sz;\nextern unsigned struct_gpio_info_sz;\nextern unsigned struct_gpio_req_sz;\nextern unsigned struct_gpio_set_sz;\nextern unsigned struct_hfsc_add_class_sz;\nextern unsigned struct_hfsc_add_filter_sz;\nextern unsigned struct_hfsc_attach_sz;\nextern unsigned struct_hfsc_class_stats_sz;\nextern unsigned struct_hfsc_delete_class_sz;\nextern unsigned struct_hfsc_delete_filter_sz;\nextern unsigned struct_hfsc_interface_sz;\nextern unsigned struct_hfsc_modify_class_sz;\nextern unsigned struct_hpcfb_dsp_op_sz;\nextern unsigned struct_hpcfb_dspconf_sz;\nextern unsigned struct_hpcfb_fbconf_sz;\nextern unsigned struct_if_addrprefreq_sz;\nextern unsigned struct_if_clonereq_sz;\nextern unsigned struct_if_laddrreq_sz;\nextern unsigned struct_ifaddr_sz;\nextern unsigned struct_ifaliasreq_sz;\nextern unsigned struct_ifcapreq_sz;\nextern unsigned struct_ifconf_sz;\nextern unsigned struct_ifdatareq_sz;\nextern unsigned struct_ifdrv_sz;\nextern unsigned struct_ifmediareq_sz;\nextern unsigned struct_ifpppcstatsreq_sz;\nextern unsigned struct_ifpppstatsreq_sz;\nextern unsigned struct_ifreq_sz;\nextern unsigned struct_in6_addrpolicy_sz;\nextern unsigned struct_in6_ndireq_sz;\nextern unsigned struct_ioc_load_unload_sz;\nextern unsigned struct_ioc_patch_sz;\nextern unsigned struct_ioc_play_blocks_sz;\nextern unsigned struct_ioc_play_msf_sz;\nextern unsigned struct_ioc_play_track_sz;\nextern unsigned struct_ioc_read_subchannel_sz;\nextern unsigned struct_ioc_read_toc_entry_sz;\nextern unsigned struct_ioc_toc_header_sz;\nextern unsigned struct_ioc_vol_sz;\nextern unsigned struct_ioctl_pt_sz;\nextern unsigned struct_ioppt_sz;\nextern unsigned struct_iovec_sz;\nextern unsigned struct_ipfobj_sz;\nextern unsigned struct_irda_params_sz;\nextern unsigned struct_isp_fc_device_sz;\nextern unsigned struct_isp_fc_tsk_mgmt_sz;\nextern unsigned struct_isp_hba_device_sz;\nextern unsigned struct_isv_cmd_sz;\nextern unsigned struct_jobs_add_class_sz;\nextern unsigned struct_jobs_add_filter_sz;\nextern unsigned struct_jobs_attach_sz;\nextern unsigned struct_jobs_class_stats_sz;\nextern unsigned struct_jobs_delete_class_sz;\nextern unsigned struct_jobs_delete_filter_sz;\nextern unsigned struct_jobs_interface_sz;\nextern unsigned struct_jobs_modify_class_sz;\nextern unsigned struct_kbentry_sz;\nextern unsigned struct_kfilter_mapping_sz;\nextern unsigned struct_kiockeymap_sz;\nextern unsigned struct_ksyms_gsymbol_sz;\nextern unsigned struct_ksyms_gvalue_sz;\nextern unsigned struct_ksyms_ogsymbol_sz;\nextern unsigned struct_kttcp_io_args_sz;\nextern unsigned struct_ltchars_sz;\nextern unsigned struct_lua_create_sz;\nextern unsigned struct_lua_info_sz;\nextern unsigned struct_lua_load_sz;\nextern unsigned struct_lua_require_sz;\nextern unsigned struct_mbpp_param_sz;\nextern unsigned struct_md_conf_sz;\nextern unsigned struct_meteor_capframe_sz;\nextern unsigned struct_meteor_counts_sz;\nextern unsigned struct_meteor_geomet_sz;\nextern unsigned struct_meteor_pixfmt_sz;\nextern unsigned struct_meteor_video_sz;\nextern unsigned struct_mlx_cinfo_sz;\nextern unsigned struct_mlx_pause_sz;\nextern unsigned struct_mlx_rebuild_request_sz;\nextern unsigned struct_mlx_rebuild_status_sz;\nextern unsigned struct_mlx_usercommand_sz;\nextern unsigned struct_mly_user_command_sz;\nextern unsigned struct_mly_user_health_sz;\nextern unsigned struct_mtget_sz;\nextern unsigned struct_mtop_sz;\nextern unsigned struct_npf_ioctl_table_sz;\nextern unsigned struct_npioctl_sz;\nextern unsigned struct_nvme_pt_command_sz;\nextern unsigned struct_ochanger_element_status_request_sz;\nextern unsigned struct_ofiocdesc_sz;\nextern unsigned struct_okiockey_sz;\nextern unsigned struct_ortentry_sz;\nextern unsigned struct_oscsi_addr_sz;\nextern unsigned struct_oss_audioinfo_sz;\nextern unsigned struct_oss_sysinfo_sz;\nextern unsigned struct_pciio_bdf_cfgreg_sz;\nextern unsigned struct_pciio_businfo_sz;\nextern unsigned struct_pciio_cfgreg_sz;\nextern unsigned struct_pciio_drvname_sz;\nextern unsigned struct_pciio_drvnameonbus_sz;\nextern unsigned struct_pcvtid_sz;\nextern unsigned struct_pf_osfp_ioctl_sz;\nextern unsigned struct_pf_status_sz;\nextern unsigned struct_pfioc_altq_sz;\nextern unsigned struct_pfioc_if_sz;\nextern unsigned struct_pfioc_iface_sz;\nextern unsigned struct_pfioc_limit_sz;\nextern unsigned struct_pfioc_natlook_sz;\nextern unsigned struct_pfioc_pooladdr_sz;\nextern unsigned struct_pfioc_qstats_sz;\nextern unsigned struct_pfioc_rule_sz;\nextern unsigned struct_pfioc_ruleset_sz;\nextern unsigned struct_pfioc_src_node_kill_sz;\nextern unsigned struct_pfioc_src_nodes_sz;\nextern unsigned struct_pfioc_state_kill_sz;\nextern unsigned struct_pfioc_state_sz;\nextern unsigned struct_pfioc_states_sz;\nextern unsigned struct_pfioc_table_sz;\nextern unsigned struct_pfioc_tm_sz;\nextern unsigned struct_pfioc_trans_sz;\nextern unsigned struct_plistref_sz;\nextern unsigned struct_power_type_sz;\nextern unsigned struct_ppp_idle_sz;\nextern unsigned struct_ppp_option_data_sz;\nextern unsigned struct_ppp_rawin_sz;\nextern unsigned struct_pppoeconnectionstate_sz;\nextern unsigned struct_pppoediscparms_sz;\nextern unsigned struct_priq_add_class_sz;\nextern unsigned struct_priq_add_filter_sz;\nextern unsigned struct_priq_class_stats_sz;\nextern unsigned struct_priq_delete_class_sz;\nextern unsigned struct_priq_delete_filter_sz;\nextern unsigned struct_priq_interface_sz;\nextern unsigned struct_priq_modify_class_sz;\nextern unsigned struct_ptmget_sz;\nextern unsigned struct_pvctxreq_sz;\nextern unsigned struct_radio_info_sz;\nextern unsigned struct_red_conf_sz;\nextern unsigned struct_red_interface_sz;\nextern unsigned struct_red_stats_sz;\nextern unsigned struct_redparams_sz;\nextern unsigned struct_rf_pmparams_sz;\nextern unsigned struct_rf_pmstat_sz;\nextern unsigned struct_rf_recon_req_sz;\nextern unsigned struct_rio_conf_sz;\nextern unsigned struct_rio_interface_sz;\nextern unsigned struct_rio_stats_sz;\nextern unsigned struct_scan_io_sz;\nextern unsigned struct_scbusaccel_args_sz;\nextern unsigned struct_scbusiodetach_args_sz;\nextern unsigned struct_scbusioscan_args_sz;\nextern unsigned struct_scsi_addr_sz;\nextern unsigned struct_seq_event_rec_sz;\nextern unsigned struct_session_op_sz;\nextern unsigned struct_sgttyb_sz;\nextern unsigned struct_sioc_sg_req_sz;\nextern unsigned struct_sioc_vif_req_sz;\nextern unsigned struct_smbioc_flags_sz;\nextern unsigned struct_smbioc_lookup_sz;\nextern unsigned struct_smbioc_oshare_sz;\nextern unsigned struct_smbioc_ossn_sz;\nextern unsigned struct_smbioc_rq_sz;\nextern unsigned struct_smbioc_rw_sz;\nextern unsigned struct_spppauthcfg_sz;\nextern unsigned struct_spppauthfailuresettings_sz;\nextern unsigned struct_spppauthfailurestats_sz;\nextern unsigned struct_spppdnsaddrs_sz;\nextern unsigned struct_spppdnssettings_sz;\nextern unsigned struct_spppidletimeout_sz;\nextern unsigned struct_spppkeepalivesettings_sz;\nextern unsigned struct_sppplcpcfg_sz;\nextern unsigned struct_spppstatus_sz;\nextern unsigned struct_spppstatusncp_sz;\nextern unsigned struct_srt_rt_sz;\nextern unsigned struct_stic_xinfo_sz;\nextern unsigned struct_sun_dkctlr_sz;\nextern unsigned struct_sun_dkgeom_sz;\nextern unsigned struct_sun_dkpart_sz;\nextern unsigned struct_synth_info_sz;\nextern unsigned struct_tbrreq_sz;\nextern unsigned struct_tchars_sz;\nextern unsigned struct_termios_sz;\nextern unsigned struct_timeval_sz;\nextern unsigned struct_twe_drivecommand_sz;\nextern unsigned struct_twe_paramcommand_sz;\nextern unsigned struct_twe_usercommand_sz;\nextern unsigned struct_ukyopon_identify_sz;\nextern unsigned struct_urio_command_sz;\nextern unsigned struct_usb_alt_interface_sz;\nextern unsigned struct_usb_bulk_ra_wb_opt_sz;\nextern unsigned struct_usb_config_desc_sz;\nextern unsigned struct_usb_ctl_report_desc_sz;\nextern unsigned struct_usb_ctl_report_sz;\nextern unsigned struct_usb_ctl_request_sz;\n#if defined(__x86_64__)\nextern unsigned struct_nvmm_ioc_capability_sz;\nextern unsigned struct_nvmm_ioc_machine_create_sz;\nextern unsigned struct_nvmm_ioc_machine_destroy_sz;\nextern unsigned struct_nvmm_ioc_machine_configure_sz;\nextern unsigned struct_nvmm_ioc_vcpu_create_sz;\nextern unsigned struct_nvmm_ioc_vcpu_destroy_sz;\nextern unsigned struct_nvmm_ioc_vcpu_configure_sz;\nextern unsigned struct_nvmm_ioc_vcpu_setstate_sz;\nextern unsigned struct_nvmm_ioc_vcpu_getstate_sz;\nextern unsigned struct_nvmm_ioc_vcpu_inject_sz;\nextern unsigned struct_nvmm_ioc_vcpu_run_sz;\nextern unsigned struct_nvmm_ioc_gpa_map_sz;\nextern unsigned struct_nvmm_ioc_gpa_unmap_sz;\nextern unsigned struct_nvmm_ioc_hva_map_sz;\nextern unsigned struct_nvmm_ioc_hva_unmap_sz;\nextern unsigned struct_nvmm_ioc_ctl_sz;\n#endif\nextern unsigned struct_spi_ioctl_configure_sz;\nextern unsigned struct_spi_ioctl_transfer_sz;\nextern unsigned struct_autofs_daemon_request_sz;\nextern unsigned struct_autofs_daemon_done_sz;\nextern unsigned struct_sctp_connectx_addrs_sz;\nextern unsigned struct_usb_device_info_old_sz;\nextern unsigned struct_usb_device_info_sz;\nextern unsigned struct_usb_device_stats_sz;\nextern unsigned struct_usb_endpoint_desc_sz;\nextern unsigned struct_usb_full_desc_sz;\nextern unsigned struct_usb_interface_desc_sz;\nextern unsigned struct_usb_string_desc_sz;\nextern unsigned struct_utoppy_readfile_sz;\nextern unsigned struct_utoppy_rename_sz;\nextern unsigned struct_utoppy_stats_sz;\nextern unsigned struct_utoppy_writefile_sz;\nextern unsigned struct_v4l2_audio_sz;\nextern unsigned struct_v4l2_audioout_sz;\nextern unsigned struct_v4l2_buffer_sz;\nextern unsigned struct_v4l2_capability_sz;\nextern unsigned struct_v4l2_control_sz;\nextern unsigned struct_v4l2_crop_sz;\nextern unsigned struct_v4l2_cropcap_sz;\nextern unsigned struct_v4l2_fmtdesc_sz;\nextern unsigned struct_v4l2_format_sz;\nextern unsigned struct_v4l2_framebuffer_sz;\nextern unsigned struct_v4l2_frequency_sz;\nextern unsigned struct_v4l2_frmivalenum_sz;\nextern unsigned struct_v4l2_frmsizeenum_sz;\nextern unsigned struct_v4l2_input_sz;\nextern unsigned struct_v4l2_jpegcompression_sz;\nextern unsigned struct_v4l2_modulator_sz;\nextern unsigned struct_v4l2_output_sz;\nextern unsigned struct_v4l2_queryctrl_sz;\nextern unsigned struct_v4l2_querymenu_sz;\nextern unsigned struct_v4l2_requestbuffers_sz;\nextern unsigned struct_v4l2_standard_sz;\nextern unsigned struct_v4l2_streamparm_sz;\nextern unsigned struct_v4l2_tuner_sz;\nextern unsigned struct_vnd_ioctl_sz;\nextern unsigned struct_vnd_user_sz;\nextern unsigned struct_vt_stat_sz;\nextern unsigned struct_wdog_conf_sz;\nextern unsigned struct_wdog_mode_sz;\nextern unsigned struct_ipmi_recv_sz;\nextern unsigned struct_ipmi_req_sz;\nextern unsigned struct_ipmi_cmdspec_sz;\nextern unsigned struct_wfq_conf_sz;\nextern unsigned struct_wfq_getqid_sz;\nextern unsigned struct_wfq_getstats_sz;\nextern unsigned struct_wfq_interface_sz;\nextern unsigned struct_wfq_setweight_sz;\nextern unsigned struct_winsize_sz;\nextern unsigned struct_wscons_event_sz;\nextern unsigned struct_wsdisplay_addscreendata_sz;\nextern unsigned struct_wsdisplay_char_sz;\nextern unsigned struct_wsdisplay_cmap_sz;\nextern unsigned struct_wsdisplay_curpos_sz;\nextern unsigned struct_wsdisplay_cursor_sz;\nextern unsigned struct_wsdisplay_delscreendata_sz;\nextern unsigned struct_wsdisplay_fbinfo_sz;\nextern unsigned struct_wsdisplay_font_sz;\nextern unsigned struct_wsdisplay_kbddata_sz;\nextern unsigned struct_wsdisplay_msgattrs_sz;\nextern unsigned struct_wsdisplay_param_sz;\nextern unsigned struct_wsdisplay_scroll_data_sz;\nextern unsigned struct_wsdisplay_usefontdata_sz;\nextern unsigned struct_wsdisplayio_blit_sz;\nextern unsigned struct_wsdisplayio_bus_id_sz;\nextern unsigned struct_wsdisplayio_edid_info_sz;\nextern unsigned struct_wsdisplayio_fbinfo_sz;\nextern unsigned struct_wskbd_bell_data_sz;\nextern unsigned struct_wskbd_keyrepeat_data_sz;\nextern unsigned struct_wskbd_map_data_sz;\nextern unsigned struct_wskbd_scroll_data_sz;\nextern unsigned struct_wsmouse_calibcoords_sz;\nextern unsigned struct_wsmouse_id_sz;\nextern unsigned struct_wsmouse_repeat_sz;\nextern unsigned struct_wsmux_device_list_sz;\nextern unsigned struct_wsmux_device_sz;\nextern unsigned struct_xd_iocmd_sz;\n\nextern unsigned struct_scsireq_sz;\nextern unsigned struct_tone_sz;\nextern unsigned union_twe_statrequest_sz;\nextern unsigned struct_usb_device_descriptor_sz;\nextern unsigned struct_vt_mode_sz;\nextern unsigned struct__old_mixer_info_sz;\nextern unsigned struct__agp_allocate_sz;\nextern unsigned struct__agp_bind_sz;\nextern unsigned struct__agp_info_sz;\nextern unsigned struct__agp_setup_sz;\nextern unsigned struct__agp_unbind_sz;\nextern unsigned struct_atareq_sz;\nextern unsigned struct_cpustate_sz;\nextern unsigned struct_dmx_caps_sz;\nextern unsigned enum_dmx_source_sz;\nextern unsigned union_dvd_authinfo_sz;\nextern unsigned union_dvd_struct_sz;\nextern unsigned enum_v4l2_priority_sz;\nextern unsigned struct_envsys_basic_info_sz;\nextern unsigned struct_envsys_tre_data_sz;\nextern unsigned enum_fe_sec_mini_cmd_sz;\nextern unsigned enum_fe_sec_tone_mode_sz;\nextern unsigned enum_fe_sec_voltage_sz;\nextern unsigned enum_fe_status_sz;\nextern unsigned struct_gdt_ctrt_sz;\nextern unsigned struct_gdt_event_sz;\nextern unsigned struct_gdt_osv_sz;\nextern unsigned struct_gdt_rescan_sz;\nextern unsigned struct_gdt_statist_sz;\nextern unsigned struct_gdt_ucmd_sz;\nextern unsigned struct_iscsi_conn_status_parameters_sz;\nextern unsigned struct_iscsi_get_version_parameters_sz;\nextern unsigned struct_iscsi_iocommand_parameters_sz;\nextern unsigned struct_iscsi_login_parameters_sz;\nextern unsigned struct_iscsi_logout_parameters_sz;\nextern unsigned struct_iscsi_register_event_parameters_sz;\nextern unsigned struct_iscsi_remove_parameters_sz;\nextern unsigned struct_iscsi_send_targets_parameters_sz;\nextern unsigned struct_iscsi_set_node_name_parameters_sz;\nextern unsigned struct_iscsi_wait_event_parameters_sz;\nextern unsigned struct_isp_stats_sz;\nextern unsigned struct_lsenable_sz;\nextern unsigned struct_lsdisable_sz;\nextern unsigned struct_audio_format_query_sz;\nextern unsigned struct_mixer_ctrl_sz;\nextern unsigned struct_mixer_devinfo_sz;\nextern unsigned struct_mpu_command_rec_sz;\nextern unsigned struct_rndstat_sz;\nextern unsigned struct_rndstat_name_sz;\nextern unsigned struct_rndctl_sz;\nextern unsigned struct_rnddata_sz;\nextern unsigned struct_rndpoolstat_sz;\nextern unsigned struct_rndstat_est_sz;\nextern unsigned struct_rndstat_est_name_sz;\nextern unsigned struct_pps_params_sz;\nextern unsigned struct_pps_info_sz;\nextern unsigned struct_mixer_info_sz;\nextern unsigned struct_RF_SparetWait_sz;\nextern unsigned struct_RF_ComponentLabel_sz;\nextern unsigned struct_RF_SingleComponent_sz;\nextern unsigned struct_RF_ProgressInfo_sz;\nextern unsigned struct_nvlist_ref_sz;\nextern unsigned struct_StringList_sz;\n\n// A special value to mark ioctls that are not present on the target platform,\n// when it can not be determined without including any system headers.\nextern const unsigned IOCTL_NOT_PRESENT;\n\nextern unsigned IOCTL_AFM_ADDFMAP;\nextern unsigned IOCTL_AFM_DELFMAP;\nextern unsigned IOCTL_AFM_CLEANFMAP;\nextern unsigned IOCTL_AFM_GETFMAP;\nextern unsigned IOCTL_ALTQGTYPE;\nextern unsigned IOCTL_ALTQTBRSET;\nextern unsigned IOCTL_ALTQTBRGET;\nextern unsigned IOCTL_BLUE_IF_ATTACH;\nextern unsigned IOCTL_BLUE_IF_DETACH;\nextern unsigned IOCTL_BLUE_ENABLE;\nextern unsigned IOCTL_BLUE_DISABLE;\nextern unsigned IOCTL_BLUE_CONFIG;\nextern unsigned IOCTL_BLUE_GETSTATS;\nextern unsigned IOCTL_CBQ_IF_ATTACH;\nextern unsigned IOCTL_CBQ_IF_DETACH;\nextern unsigned IOCTL_CBQ_ENABLE;\nextern unsigned IOCTL_CBQ_DISABLE;\nextern unsigned IOCTL_CBQ_CLEAR_HIERARCHY;\nextern unsigned IOCTL_CBQ_ADD_CLASS;\nextern unsigned IOCTL_CBQ_DEL_CLASS;\nextern unsigned IOCTL_CBQ_MODIFY_CLASS;\nextern unsigned IOCTL_CBQ_ADD_FILTER;\nextern unsigned IOCTL_CBQ_DEL_FILTER;\nextern unsigned IOCTL_CBQ_GETSTATS;\nextern unsigned IOCTL_CDNR_IF_ATTACH;\nextern unsigned IOCTL_CDNR_IF_DETACH;\nextern unsigned IOCTL_CDNR_ENABLE;\nextern unsigned IOCTL_CDNR_DISABLE;\nextern unsigned IOCTL_CDNR_ADD_FILTER;\nextern unsigned IOCTL_CDNR_DEL_FILTER;\nextern unsigned IOCTL_CDNR_GETSTATS;\nextern unsigned IOCTL_CDNR_ADD_ELEM;\nextern unsigned IOCTL_CDNR_DEL_ELEM;\nextern unsigned IOCTL_CDNR_ADD_TBM;\nextern unsigned IOCTL_CDNR_MOD_TBM;\nextern unsigned IOCTL_CDNR_TBM_STATS;\nextern unsigned IOCTL_CDNR_ADD_TCM;\nextern unsigned IOCTL_CDNR_MOD_TCM;\nextern unsigned IOCTL_CDNR_TCM_STATS;\nextern unsigned IOCTL_CDNR_ADD_TSW;\nextern unsigned IOCTL_CDNR_MOD_TSW;\nextern unsigned IOCTL_FIFOQ_IF_ATTACH;\nextern unsigned IOCTL_FIFOQ_IF_DETACH;\nextern unsigned IOCTL_FIFOQ_ENABLE;\nextern unsigned IOCTL_FIFOQ_DISABLE;\nextern unsigned IOCTL_FIFOQ_CONFIG;\nextern unsigned IOCTL_FIFOQ_GETSTATS;\nextern unsigned IOCTL_HFSC_IF_ATTACH;\nextern unsigned IOCTL_HFSC_IF_DETACH;\nextern unsigned IOCTL_HFSC_ENABLE;\nextern unsigned IOCTL_HFSC_DISABLE;\nextern unsigned IOCTL_HFSC_CLEAR_HIERARCHY;\nextern unsigned IOCTL_HFSC_ADD_CLASS;\nextern unsigned IOCTL_HFSC_DEL_CLASS;\nextern unsigned IOCTL_HFSC_MOD_CLASS;\nextern unsigned IOCTL_HFSC_ADD_FILTER;\nextern unsigned IOCTL_HFSC_DEL_FILTER;\nextern unsigned IOCTL_HFSC_GETSTATS;\nextern unsigned IOCTL_JOBS_IF_ATTACH;\nextern unsigned IOCTL_JOBS_IF_DETACH;\nextern unsigned IOCTL_JOBS_ENABLE;\nextern unsigned IOCTL_JOBS_DISABLE;\nextern unsigned IOCTL_JOBS_CLEAR;\nextern unsigned IOCTL_JOBS_ADD_CLASS;\nextern unsigned IOCTL_JOBS_DEL_CLASS;\nextern unsigned IOCTL_JOBS_MOD_CLASS;\nextern unsigned IOCTL_JOBS_ADD_FILTER;\nextern unsigned IOCTL_JOBS_DEL_FILTER;\nextern unsigned IOCTL_JOBS_GETSTATS;\nextern unsigned IOCTL_PRIQ_IF_ATTACH;\nextern unsigned IOCTL_PRIQ_IF_DETACH;\nextern unsigned IOCTL_PRIQ_ENABLE;\nextern unsigned IOCTL_PRIQ_DISABLE;\nextern unsigned IOCTL_PRIQ_CLEAR;\nextern unsigned IOCTL_PRIQ_ADD_CLASS;\nextern unsigned IOCTL_PRIQ_DEL_CLASS;\nextern unsigned IOCTL_PRIQ_MOD_CLASS;\nextern unsigned IOCTL_PRIQ_ADD_FILTER;\nextern unsigned IOCTL_PRIQ_DEL_FILTER;\nextern unsigned IOCTL_PRIQ_GETSTATS;\nextern unsigned IOCTL_RED_IF_ATTACH;\nextern unsigned IOCTL_RED_IF_DETACH;\nextern unsigned IOCTL_RED_ENABLE;\nextern unsigned IOCTL_RED_DISABLE;\nextern unsigned IOCTL_RED_CONFIG;\nextern unsigned IOCTL_RED_GETSTATS;\nextern unsigned IOCTL_RED_SETDEFAULTS;\nextern unsigned IOCTL_RIO_IF_ATTACH;\nextern unsigned IOCTL_RIO_IF_DETACH;\nextern unsigned IOCTL_RIO_ENABLE;\nextern unsigned IOCTL_RIO_DISABLE;\nextern unsigned IOCTL_RIO_CONFIG;\nextern unsigned IOCTL_RIO_GETSTATS;\nextern unsigned IOCTL_RIO_SETDEFAULTS;\nextern unsigned IOCTL_WFQ_IF_ATTACH;\nextern unsigned IOCTL_WFQ_IF_DETACH;\nextern unsigned IOCTL_WFQ_ENABLE;\nextern unsigned IOCTL_WFQ_DISABLE;\nextern unsigned IOCTL_WFQ_CONFIG;\nextern unsigned IOCTL_WFQ_GET_STATS;\nextern unsigned IOCTL_WFQ_GET_QID;\nextern unsigned IOCTL_WFQ_SET_WEIGHT;\nextern unsigned IOCTL_CRIOGET;\nextern unsigned IOCTL_CIOCFSESSION;\nextern unsigned IOCTL_CIOCKEY;\nextern unsigned IOCTL_CIOCNFKEYM;\nextern unsigned IOCTL_CIOCNFSESSION;\nextern unsigned IOCTL_CIOCNCRYPTRETM;\nextern unsigned IOCTL_CIOCNCRYPTRET;\nextern unsigned IOCTL_CIOCGSESSION;\nextern unsigned IOCTL_CIOCNGSESSION;\nextern unsigned IOCTL_CIOCCRYPT;\nextern unsigned IOCTL_CIOCNCRYPTM;\nextern unsigned IOCTL_CIOCASYMFEAT;\nextern unsigned IOCTL_APM_IOC_REJECT;\nextern unsigned IOCTL_APM_IOC_STANDBY;\nextern unsigned IOCTL_APM_IOC_SUSPEND;\nextern unsigned IOCTL_OAPM_IOC_GETPOWER;\nextern unsigned IOCTL_APM_IOC_GETPOWER;\nextern unsigned IOCTL_APM_IOC_NEXTEVENT;\nextern unsigned IOCTL_APM_IOC_DEV_CTL;\nextern unsigned IOCTL_NETBSD_DM_IOCTL;\nextern unsigned IOCTL_DMIO_SETFUNC;\nextern unsigned IOCTL_DMX_START;\nextern unsigned IOCTL_DMX_STOP;\nextern unsigned IOCTL_DMX_SET_FILTER;\nextern unsigned IOCTL_DMX_SET_PES_FILTER;\nextern unsigned IOCTL_DMX_SET_BUFFER_SIZE;\nextern unsigned IOCTL_DMX_GET_STC;\nextern unsigned IOCTL_DMX_ADD_PID;\nextern unsigned IOCTL_DMX_REMOVE_PID;\nextern unsigned IOCTL_DMX_GET_CAPS;\nextern unsigned IOCTL_DMX_SET_SOURCE;\nextern unsigned IOCTL_FE_READ_STATUS;\nextern unsigned IOCTL_FE_READ_BER;\nextern unsigned IOCTL_FE_READ_SNR;\nextern unsigned IOCTL_FE_READ_SIGNAL_STRENGTH;\nextern unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS;\nextern unsigned IOCTL_FE_SET_FRONTEND;\nextern unsigned IOCTL_FE_GET_FRONTEND;\nextern unsigned IOCTL_FE_GET_EVENT;\nextern unsigned IOCTL_FE_GET_INFO;\nextern unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD;\nextern unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD;\nextern unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY;\nextern unsigned IOCTL_FE_DISEQC_SEND_BURST;\nextern unsigned IOCTL_FE_SET_TONE;\nextern unsigned IOCTL_FE_SET_VOLTAGE;\nextern unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE;\nextern unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE;\nextern unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD;\nextern unsigned IOCTL_FILEMON_SET_FD;\nextern unsigned IOCTL_FILEMON_SET_PID;\nextern unsigned IOCTL_HDAUDIO_FGRP_INFO;\nextern unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG;\nextern unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG;\nextern unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO;\nextern unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO;\nextern unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO;\nextern unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO;\nextern unsigned IOCTL_CEC_GET_PHYS_ADDR;\nextern unsigned IOCTL_CEC_GET_LOG_ADDRS;\nextern unsigned IOCTL_CEC_SET_LOG_ADDRS;\nextern unsigned IOCTL_CEC_GET_VENDOR_ID;\nextern unsigned IOCTL_HPCFBIO_GCONF;\nextern unsigned IOCTL_HPCFBIO_SCONF;\nextern unsigned IOCTL_HPCFBIO_GDSPCONF;\nextern unsigned IOCTL_HPCFBIO_SDSPCONF;\nextern unsigned IOCTL_HPCFBIO_GOP;\nextern unsigned IOCTL_HPCFBIO_SOP;\nextern unsigned IOCTL_IOPIOCPT;\nextern unsigned IOCTL_IOPIOCGLCT;\nextern unsigned IOCTL_IOPIOCGSTATUS;\nextern unsigned IOCTL_IOPIOCRECONFIG;\nextern unsigned IOCTL_IOPIOCGTIDMAP;\nextern unsigned IOCTL_SIOCGATHSTATS;\nextern unsigned IOCTL_SIOCGATHDIAG;\nextern unsigned IOCTL_METEORCAPTUR;\nextern unsigned IOCTL_METEORCAPFRM;\nextern unsigned IOCTL_METEORSETGEO;\nextern unsigned IOCTL_METEORGETGEO;\nextern unsigned IOCTL_METEORSTATUS;\nextern unsigned IOCTL_METEORSHUE;\nextern unsigned IOCTL_METEORGHUE;\nextern unsigned IOCTL_METEORSFMT;\nextern unsigned IOCTL_METEORGFMT;\nextern unsigned IOCTL_METEORSINPUT;\nextern unsigned IOCTL_METEORGINPUT;\nextern unsigned IOCTL_METEORSCHCV;\nextern unsigned IOCTL_METEORGCHCV;\nextern unsigned IOCTL_METEORSCOUNT;\nextern unsigned IOCTL_METEORGCOUNT;\nextern unsigned IOCTL_METEORSFPS;\nextern unsigned IOCTL_METEORGFPS;\nextern unsigned IOCTL_METEORSSIGNAL;\nextern unsigned IOCTL_METEORGSIGNAL;\nextern unsigned IOCTL_METEORSVIDEO;\nextern unsigned IOCTL_METEORGVIDEO;\nextern unsigned IOCTL_METEORSBRIG;\nextern unsigned IOCTL_METEORGBRIG;\nextern unsigned IOCTL_METEORSCSAT;\nextern unsigned IOCTL_METEORGCSAT;\nextern unsigned IOCTL_METEORSCONT;\nextern unsigned IOCTL_METEORGCONT;\nextern unsigned IOCTL_METEORSHWS;\nextern unsigned IOCTL_METEORGHWS;\nextern unsigned IOCTL_METEORSVWS;\nextern unsigned IOCTL_METEORGVWS;\nextern unsigned IOCTL_METEORSTS;\nextern unsigned IOCTL_METEORGTS;\nextern unsigned IOCTL_TVTUNER_SETCHNL;\nextern unsigned IOCTL_TVTUNER_GETCHNL;\nextern unsigned IOCTL_TVTUNER_SETTYPE;\nextern unsigned IOCTL_TVTUNER_GETTYPE;\nextern unsigned IOCTL_TVTUNER_GETSTATUS;\nextern unsigned IOCTL_TVTUNER_SETFREQ;\nextern unsigned IOCTL_TVTUNER_GETFREQ;\nextern unsigned IOCTL_TVTUNER_SETAFC;\nextern unsigned IOCTL_TVTUNER_GETAFC;\nextern unsigned IOCTL_RADIO_SETMODE;\nextern unsigned IOCTL_RADIO_GETMODE;\nextern unsigned IOCTL_RADIO_SETFREQ;\nextern unsigned IOCTL_RADIO_GETFREQ;\nextern unsigned IOCTL_METEORSACTPIXFMT;\nextern unsigned IOCTL_METEORGACTPIXFMT;\nextern unsigned IOCTL_METEORGSUPPIXFMT;\nextern unsigned IOCTL_TVTUNER_GETCHNLSET;\nextern unsigned IOCTL_REMOTE_GETKEY;\nextern unsigned IOCTL_GDT_IOCTL_GENERAL;\nextern unsigned IOCTL_GDT_IOCTL_DRVERS;\nextern unsigned IOCTL_GDT_IOCTL_CTRTYPE;\nextern unsigned IOCTL_GDT_IOCTL_OSVERS;\nextern unsigned IOCTL_GDT_IOCTL_CTRCNT;\nextern unsigned IOCTL_GDT_IOCTL_EVENT;\nextern unsigned IOCTL_GDT_IOCTL_STATIST;\nextern unsigned IOCTL_GDT_IOCTL_RESCAN;\nextern unsigned IOCTL_ISP_SDBLEV;\nextern unsigned IOCTL_ISP_RESETHBA;\nextern unsigned IOCTL_ISP_RESCAN;\nextern unsigned IOCTL_ISP_SETROLE;\nextern unsigned IOCTL_ISP_GETROLE;\nextern unsigned IOCTL_ISP_GET_STATS;\nextern unsigned IOCTL_ISP_CLR_STATS;\nextern unsigned IOCTL_ISP_FC_LIP;\nextern unsigned IOCTL_ISP_FC_GETDINFO;\nextern unsigned IOCTL_ISP_GET_FW_CRASH_DUMP;\nextern unsigned IOCTL_ISP_FORCE_CRASH_DUMP;\nextern unsigned IOCTL_ISP_FC_GETHINFO;\nextern unsigned IOCTL_ISP_TSK_MGMT;\nextern unsigned IOCTL_ISP_FC_GETDLIST;\nextern unsigned IOCTL_MLXD_STATUS;\nextern unsigned IOCTL_MLXD_CHECKASYNC;\nextern unsigned IOCTL_MLXD_DETACH;\nextern unsigned IOCTL_MLX_RESCAN_DRIVES;\nextern unsigned IOCTL_MLX_PAUSE_CHANNEL;\nextern unsigned IOCTL_MLX_COMMAND;\nextern unsigned IOCTL_MLX_REBUILDASYNC;\nextern unsigned IOCTL_MLX_REBUILDSTAT;\nextern unsigned IOCTL_MLX_GET_SYSDRIVE;\nextern unsigned IOCTL_MLX_GET_CINFO;\nextern unsigned IOCTL_NVME_PASSTHROUGH_CMD;\nextern unsigned IOCTL_FWCFGIO_SET_INDEX;\nextern unsigned IOCTL_IRDA_RESET_PARAMS;\nextern unsigned IOCTL_IRDA_SET_PARAMS;\nextern unsigned IOCTL_IRDA_GET_SPEEDMASK;\nextern unsigned IOCTL_IRDA_GET_TURNAROUNDMASK;\nextern unsigned IOCTL_IRFRAMETTY_GET_DEVICE;\nextern unsigned IOCTL_IRFRAMETTY_GET_DONGLE;\nextern unsigned IOCTL_IRFRAMETTY_SET_DONGLE;\nextern unsigned IOCTL_ISV_CMD;\nextern unsigned IOCTL_WTQICMD;\nextern unsigned IOCTL_ISCSI_GET_VERSION;\nextern unsigned IOCTL_ISCSI_LOGIN;\nextern unsigned IOCTL_ISCSI_LOGOUT;\nextern unsigned IOCTL_ISCSI_ADD_CONNECTION;\nextern unsigned IOCTL_ISCSI_RESTORE_CONNECTION;\nextern unsigned IOCTL_ISCSI_REMOVE_CONNECTION;\nextern unsigned IOCTL_ISCSI_CONNECTION_STATUS;\nextern unsigned IOCTL_ISCSI_SEND_TARGETS;\nextern unsigned IOCTL_ISCSI_SET_NODE_NAME;\nextern unsigned IOCTL_ISCSI_IO_COMMAND;\nextern unsigned IOCTL_ISCSI_REGISTER_EVENT;\nextern unsigned IOCTL_ISCSI_DEREGISTER_EVENT;\nextern unsigned IOCTL_ISCSI_WAIT_EVENT;\nextern unsigned IOCTL_ISCSI_POLL_EVENT;\nextern unsigned IOCTL_OFIOCGET;\nextern unsigned IOCTL_OFIOCSET;\nextern unsigned IOCTL_OFIOCNEXTPROP;\nextern unsigned IOCTL_OFIOCGETOPTNODE;\nextern unsigned IOCTL_OFIOCGETNEXT;\nextern unsigned IOCTL_OFIOCGETCHILD;\nextern unsigned IOCTL_OFIOCFINDDEVICE;\nextern unsigned IOCTL_AMR_IO_VERSION;\nextern unsigned IOCTL_AMR_IO_COMMAND;\nextern unsigned IOCTL_MLYIO_COMMAND;\nextern unsigned IOCTL_MLYIO_HEALTH;\nextern unsigned IOCTL_PCI_IOC_CFGREAD;\nextern unsigned IOCTL_PCI_IOC_CFGWRITE;\nextern unsigned IOCTL_PCI_IOC_BDF_CFGREAD;\nextern unsigned IOCTL_PCI_IOC_BDF_CFGWRITE;\nextern unsigned IOCTL_PCI_IOC_BUSINFO;\nextern unsigned IOCTL_PCI_IOC_DRVNAME;\nextern unsigned IOCTL_PCI_IOC_DRVNAMEONBUS;\nextern unsigned IOCTL_TWEIO_COMMAND;\nextern unsigned IOCTL_TWEIO_STATS;\nextern unsigned IOCTL_TWEIO_AEN_POLL;\nextern unsigned IOCTL_TWEIO_AEN_WAIT;\nextern unsigned IOCTL_TWEIO_SET_PARAM;\nextern unsigned IOCTL_TWEIO_GET_PARAM;\nextern unsigned IOCTL_TWEIO_RESET;\nextern unsigned IOCTL_TWEIO_ADD_UNIT;\nextern unsigned IOCTL_TWEIO_DEL_UNIT;\nextern unsigned IOCTL_SIOCSCNWDOMAIN;\nextern unsigned IOCTL_SIOCGCNWDOMAIN;\nextern unsigned IOCTL_SIOCSCNWKEY;\nextern unsigned IOCTL_SIOCGCNWSTATUS;\nextern unsigned IOCTL_SIOCGCNWSTATS;\nextern unsigned IOCTL_SIOCGCNWTRAIL;\nextern unsigned IOCTL_SIOCGRAYSIGLEV;\nextern unsigned IOCTL_RAIDFRAME_SHUTDOWN;\nextern unsigned IOCTL_RAIDFRAME_TUR;\nextern unsigned IOCTL_RAIDFRAME_FAIL_DISK;\nextern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS;\nextern unsigned IOCTL_RAIDFRAME_REWRITEPARITY;\nextern unsigned IOCTL_RAIDFRAME_COPYBACK;\nextern unsigned IOCTL_RAIDFRAME_SPARET_WAIT;\nextern unsigned IOCTL_RAIDFRAME_SEND_SPARET;\nextern unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT;\nextern unsigned IOCTL_RAIDFRAME_START_ATRACE;\nextern unsigned IOCTL_RAIDFRAME_STOP_ATRACE;\nextern unsigned IOCTL_RAIDFRAME_GET_SIZE;\nextern unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS;\nextern unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS;\nextern unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL;\nextern unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL;\nextern unsigned IOCTL_RAIDFRAME_INIT_LABELS;\nextern unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE;\nextern unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE;\nextern unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE;\nextern unsigned IOCTL_RAIDFRAME_CHECK_PARITY;\nextern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS;\nextern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS;\nextern unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG;\nextern unsigned IOCTL_RAIDFRAME_SET_ROOT;\nextern unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT;\nextern unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE;\nextern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT;\nextern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT;\nextern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT;\nextern unsigned IOCTL_RAIDFRAME_CONFIGURE;\nextern unsigned IOCTL_RAIDFRAME_GET_INFO;\nextern unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS;\nextern unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE;\nextern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE;\nextern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS;\nextern unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT;\nextern unsigned IOCTL_MBPPIOCSPARAM;\nextern unsigned IOCTL_MBPPIOCGPARAM;\nextern unsigned IOCTL_MBPPIOCGSTAT;\nextern unsigned IOCTL_SESIOC_GETNOBJ;\nextern unsigned IOCTL_SESIOC_GETOBJMAP;\nextern unsigned IOCTL_SESIOC_GETENCSTAT;\nextern unsigned IOCTL_SESIOC_SETENCSTAT;\nextern unsigned IOCTL_SESIOC_GETOBJSTAT;\nextern unsigned IOCTL_SESIOC_SETOBJSTAT;\nextern unsigned IOCTL_SESIOC_GETTEXT;\nextern unsigned IOCTL_SESIOC_INIT;\nextern unsigned IOCTL_SUN_DKIOCGGEOM;\nextern unsigned IOCTL_SUN_DKIOCINFO;\nextern unsigned IOCTL_SUN_DKIOCGPART;\nextern unsigned IOCTL_FBIOGTYPE;\nextern unsigned IOCTL_FBIOPUTCMAP;\nextern unsigned IOCTL_FBIOGETCMAP;\nextern unsigned IOCTL_FBIOGATTR;\nextern unsigned IOCTL_FBIOSVIDEO;\nextern unsigned IOCTL_FBIOGVIDEO;\nextern unsigned IOCTL_FBIOSCURSOR;\nextern unsigned IOCTL_FBIOGCURSOR;\nextern unsigned IOCTL_FBIOSCURPOS;\nextern unsigned IOCTL_FBIOGCURPOS;\nextern unsigned IOCTL_FBIOGCURMAX;\nextern unsigned IOCTL_KIOCTRANS;\nextern unsigned IOCTL_KIOCSETKEY;\nextern unsigned IOCTL_KIOCGETKEY;\nextern unsigned IOCTL_KIOCGTRANS;\nextern unsigned IOCTL_KIOCCMD;\nextern unsigned IOCTL_KIOCTYPE;\nextern unsigned IOCTL_KIOCSDIRECT;\nextern unsigned IOCTL_KIOCSKEY;\nextern unsigned IOCTL_KIOCGKEY;\nextern unsigned IOCTL_KIOCSLED;\nextern unsigned IOCTL_KIOCGLED;\nextern unsigned IOCTL_KIOCLAYOUT;\nextern unsigned IOCTL_VUIDSFORMAT;\nextern unsigned IOCTL_VUIDGFORMAT;\nextern unsigned IOCTL_STICIO_GXINFO;\nextern unsigned IOCTL_STICIO_RESET;\nextern unsigned IOCTL_STICIO_STARTQ;\nextern unsigned IOCTL_STICIO_STOPQ;\nextern unsigned IOCTL_UKYOPON_IDENTIFY;\nextern unsigned IOCTL_URIO_SEND_COMMAND;\nextern unsigned IOCTL_URIO_RECV_COMMAND;\nextern unsigned IOCTL_USB_REQUEST;\nextern unsigned IOCTL_USB_SETDEBUG;\nextern unsigned IOCTL_USB_DISCOVER;\nextern unsigned IOCTL_USB_DEVICEINFO;\nextern unsigned IOCTL_USB_DEVICEINFO_OLD;\nextern unsigned IOCTL_USB_DEVICESTATS;\nextern unsigned IOCTL_USB_GET_REPORT_DESC;\nextern unsigned IOCTL_USB_SET_IMMED;\nextern unsigned IOCTL_USB_GET_REPORT;\nextern unsigned IOCTL_USB_SET_REPORT;\nextern unsigned IOCTL_USB_GET_REPORT_ID;\nextern unsigned IOCTL_USB_GET_CONFIG;\nextern unsigned IOCTL_USB_SET_CONFIG;\nextern unsigned IOCTL_USB_GET_ALTINTERFACE;\nextern unsigned IOCTL_USB_SET_ALTINTERFACE;\nextern unsigned IOCTL_USB_GET_NO_ALT;\nextern unsigned IOCTL_USB_GET_DEVICE_DESC;\nextern unsigned IOCTL_USB_GET_CONFIG_DESC;\nextern unsigned IOCTL_USB_GET_INTERFACE_DESC;\nextern unsigned IOCTL_USB_GET_ENDPOINT_DESC;\nextern unsigned IOCTL_USB_GET_FULL_DESC;\nextern unsigned IOCTL_USB_GET_STRING_DESC;\nextern unsigned IOCTL_USB_DO_REQUEST;\nextern unsigned IOCTL_USB_GET_DEVICEINFO;\nextern unsigned IOCTL_USB_GET_DEVICEINFO_OLD;\nextern unsigned IOCTL_USB_SET_SHORT_XFER;\nextern unsigned IOCTL_USB_SET_TIMEOUT;\nextern unsigned IOCTL_USB_SET_BULK_RA;\nextern unsigned IOCTL_USB_SET_BULK_WB;\nextern unsigned IOCTL_USB_SET_BULK_RA_OPT;\nextern unsigned IOCTL_USB_SET_BULK_WB_OPT;\nextern unsigned IOCTL_USB_GET_CM_OVER_DATA;\nextern unsigned IOCTL_USB_SET_CM_OVER_DATA;\nextern unsigned IOCTL_UTOPPYIOTURBO;\nextern unsigned IOCTL_UTOPPYIOCANCEL;\nextern unsigned IOCTL_UTOPPYIOREBOOT;\nextern unsigned IOCTL_UTOPPYIOSTATS;\nextern unsigned IOCTL_UTOPPYIORENAME;\nextern unsigned IOCTL_UTOPPYIOMKDIR;\nextern unsigned IOCTL_UTOPPYIODELETE;\nextern unsigned IOCTL_UTOPPYIOREADDIR;\nextern unsigned IOCTL_UTOPPYIOREADFILE;\nextern unsigned IOCTL_UTOPPYIOWRITEFILE;\nextern unsigned IOCTL_DIOSXDCMD;\nextern unsigned IOCTL_VT_OPENQRY;\nextern unsigned IOCTL_VT_SETMODE;\nextern unsigned IOCTL_VT_GETMODE;\nextern unsigned IOCTL_VT_RELDISP;\nextern unsigned IOCTL_VT_ACTIVATE;\nextern unsigned IOCTL_VT_WAITACTIVE;\nextern unsigned IOCTL_VT_GETACTIVE;\nextern unsigned IOCTL_VT_GETSTATE;\nextern unsigned IOCTL_KDGETKBENT;\nextern unsigned IOCTL_KDGKBMODE;\nextern unsigned IOCTL_KDSKBMODE;\nextern unsigned IOCTL_KDMKTONE;\nextern unsigned IOCTL_KDSETMODE;\nextern unsigned IOCTL_KDENABIO;\nextern unsigned IOCTL_KDDISABIO;\nextern unsigned IOCTL_KDGKBTYPE;\nextern unsigned IOCTL_KDGETLED;\nextern unsigned IOCTL_KDSETLED;\nextern unsigned IOCTL_KDSETRAD;\nextern unsigned IOCTL_VGAPCVTID;\nextern unsigned IOCTL_CONS_GETVERS;\nextern unsigned IOCTL_WSKBDIO_GTYPE;\nextern unsigned IOCTL_WSKBDIO_BELL;\nextern unsigned IOCTL_WSKBDIO_COMPLEXBELL;\nextern unsigned IOCTL_WSKBDIO_SETBELL;\nextern unsigned IOCTL_WSKBDIO_GETBELL;\nextern unsigned IOCTL_WSKBDIO_SETDEFAULTBELL;\nextern unsigned IOCTL_WSKBDIO_GETDEFAULTBELL;\nextern unsigned IOCTL_WSKBDIO_SETKEYREPEAT;\nextern unsigned IOCTL_WSKBDIO_GETKEYREPEAT;\nextern unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT;\nextern unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT;\nextern unsigned IOCTL_WSKBDIO_SETLEDS;\nextern unsigned IOCTL_WSKBDIO_GETLEDS;\nextern unsigned IOCTL_WSKBDIO_GETMAP;\nextern unsigned IOCTL_WSKBDIO_SETMAP;\nextern unsigned IOCTL_WSKBDIO_GETENCODING;\nextern unsigned IOCTL_WSKBDIO_SETENCODING;\nextern unsigned IOCTL_WSKBDIO_SETMODE;\nextern unsigned IOCTL_WSKBDIO_GETMODE;\nextern unsigned IOCTL_WSKBDIO_SETKEYCLICK;\nextern unsigned IOCTL_WSKBDIO_GETKEYCLICK;\nextern unsigned IOCTL_WSKBDIO_GETSCROLL;\nextern unsigned IOCTL_WSKBDIO_SETSCROLL;\nextern unsigned IOCTL_WSKBDIO_SETVERSION;\nextern unsigned IOCTL_WSMOUSEIO_GTYPE;\nextern unsigned IOCTL_WSMOUSEIO_SRES;\nextern unsigned IOCTL_WSMOUSEIO_SSCALE;\nextern unsigned IOCTL_WSMOUSEIO_SRATE;\nextern unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS;\nextern unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS;\nextern unsigned IOCTL_WSMOUSEIO_GETID;\nextern unsigned IOCTL_WSMOUSEIO_GETREPEAT;\nextern unsigned IOCTL_WSMOUSEIO_SETREPEAT;\nextern unsigned IOCTL_WSMOUSEIO_SETVERSION;\nextern unsigned IOCTL_WSDISPLAYIO_GTYPE;\nextern unsigned IOCTL_WSDISPLAYIO_GINFO;\nextern unsigned IOCTL_WSDISPLAYIO_GETCMAP;\nextern unsigned IOCTL_WSDISPLAYIO_PUTCMAP;\nextern unsigned IOCTL_WSDISPLAYIO_GVIDEO;\nextern unsigned IOCTL_WSDISPLAYIO_SVIDEO;\nextern unsigned IOCTL_WSDISPLAYIO_GCURPOS;\nextern unsigned IOCTL_WSDISPLAYIO_SCURPOS;\nextern unsigned IOCTL_WSDISPLAYIO_GCURMAX;\nextern unsigned IOCTL_WSDISPLAYIO_GCURSOR;\nextern unsigned IOCTL_WSDISPLAYIO_SCURSOR;\nextern unsigned IOCTL_WSDISPLAYIO_GMODE;\nextern unsigned IOCTL_WSDISPLAYIO_SMODE;\nextern unsigned IOCTL_WSDISPLAYIO_LDFONT;\nextern unsigned IOCTL_WSDISPLAYIO_ADDSCREEN;\nextern unsigned IOCTL_WSDISPLAYIO_DELSCREEN;\nextern unsigned IOCTL_WSDISPLAYIO_SFONT;\nextern unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD;\nextern unsigned IOCTL_WSDISPLAYIO_GETPARAM;\nextern unsigned IOCTL_WSDISPLAYIO_SETPARAM;\nextern unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN;\nextern unsigned IOCTL_WSDISPLAYIO_GETWSCHAR;\nextern unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR;\nextern unsigned IOCTL_WSDISPLAYIO_DGSCROLL;\nextern unsigned IOCTL_WSDISPLAYIO_DSSCROLL;\nextern unsigned IOCTL_WSDISPLAYIO_GMSGATTRS;\nextern unsigned IOCTL_WSDISPLAYIO_SMSGATTRS;\nextern unsigned IOCTL_WSDISPLAYIO_GBORDER;\nextern unsigned IOCTL_WSDISPLAYIO_SBORDER;\nextern unsigned IOCTL_WSDISPLAYIO_SSPLASH;\nextern unsigned IOCTL_WSDISPLAYIO_SPROGRESS;\nextern unsigned IOCTL_WSDISPLAYIO_LINEBYTES;\nextern unsigned IOCTL_WSDISPLAYIO_SETVERSION;\nextern unsigned IOCTL_WSMUXIO_ADD_DEVICE;\nextern unsigned IOCTL_WSMUXIO_REMOVE_DEVICE;\nextern unsigned IOCTL_WSMUXIO_LIST_DEVICES;\nextern unsigned IOCTL_WSMUXIO_INJECTEVENT;\nextern unsigned IOCTL_WSDISPLAYIO_GET_BUSID;\nextern unsigned IOCTL_WSDISPLAYIO_GET_EDID;\nextern unsigned IOCTL_WSDISPLAYIO_SET_POLLING;\nextern unsigned IOCTL_WSDISPLAYIO_GET_FBINFO;\nextern unsigned IOCTL_WSDISPLAYIO_DOBLIT;\nextern unsigned IOCTL_WSDISPLAYIO_WAITBLIT;\nextern unsigned IOCTL_BIOCLOCATE;\nextern unsigned IOCTL_BIOCINQ;\nextern unsigned IOCTL_BIOCDISK_NOVOL;\nextern unsigned IOCTL_BIOCDISK;\nextern unsigned IOCTL_BIOCVOL;\nextern unsigned IOCTL_BIOCALARM;\nextern unsigned IOCTL_BIOCBLINK;\nextern unsigned IOCTL_BIOCSETSTATE;\nextern unsigned IOCTL_BIOCVOLOPS;\nextern unsigned IOCTL_MD_GETCONF;\nextern unsigned IOCTL_MD_SETCONF;\nextern unsigned IOCTL_CCDIOCSET;\nextern unsigned IOCTL_CCDIOCCLR;\nextern unsigned IOCTL_CGDIOCSET;\nextern unsigned IOCTL_CGDIOCCLR;\nextern unsigned IOCTL_CGDIOCGET;\nextern unsigned IOCTL_FSSIOCSET;\nextern unsigned IOCTL_FSSIOCGET;\nextern unsigned IOCTL_FSSIOCCLR;\nextern unsigned IOCTL_FSSIOFSET;\nextern unsigned IOCTL_FSSIOFGET;\nextern unsigned IOCTL_BTDEV_ATTACH;\nextern unsigned IOCTL_BTDEV_DETACH;\nextern unsigned IOCTL_BTSCO_GETINFO;\nextern unsigned IOCTL_KTTCP_IO_SEND;\nextern unsigned IOCTL_KTTCP_IO_RECV;\nextern unsigned IOCTL_IOC_LOCKSTAT_GVERSION;\nextern unsigned IOCTL_IOC_LOCKSTAT_ENABLE;\nextern unsigned IOCTL_IOC_LOCKSTAT_DISABLE;\nextern unsigned IOCTL_VNDIOCSET;\nextern unsigned IOCTL_VNDIOCCLR;\nextern unsigned IOCTL_VNDIOCGET;\nextern unsigned IOCTL_SPKRTONE;\nextern unsigned IOCTL_SPKRTUNE;\nextern unsigned IOCTL_SPKRGETVOL;\nextern unsigned IOCTL_SPKRSETVOL;\n#if defined(__x86_64__)\nextern unsigned IOCTL_NVMM_IOC_CAPABILITY;\nextern unsigned IOCTL_NVMM_IOC_MACHINE_CREATE;\nextern unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY;\nextern unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE;\nextern unsigned IOCTL_NVMM_IOC_VCPU_CREATE;\nextern unsigned IOCTL_NVMM_IOC_VCPU_DESTROY;\nextern unsigned IOCTL_NVMM_IOC_VCPU_CONFIGURE;\nextern unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE;\nextern unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE;\nextern unsigned IOCTL_NVMM_IOC_VCPU_INJECT;\nextern unsigned IOCTL_NVMM_IOC_VCPU_RUN;\nextern unsigned IOCTL_NVMM_IOC_GPA_MAP;\nextern unsigned IOCTL_NVMM_IOC_GPA_UNMAP;\nextern unsigned IOCTL_NVMM_IOC_HVA_MAP;\nextern unsigned IOCTL_NVMM_IOC_HVA_UNMAP;\nextern unsigned IOCTL_NVMM_IOC_CTL;\n#endif\nextern unsigned IOCTL_AUTOFSREQUEST;\nextern unsigned IOCTL_AUTOFSDONE;\nextern unsigned IOCTL_BIOCGBLEN;\nextern unsigned IOCTL_BIOCSBLEN;\nextern unsigned IOCTL_BIOCSETF;\nextern unsigned IOCTL_BIOCFLUSH;\nextern unsigned IOCTL_BIOCPROMISC;\nextern unsigned IOCTL_BIOCGDLT;\nextern unsigned IOCTL_BIOCGETIF;\nextern unsigned IOCTL_BIOCSETIF;\nextern unsigned IOCTL_BIOCGSTATS;\nextern unsigned IOCTL_BIOCGSTATSOLD;\nextern unsigned IOCTL_BIOCIMMEDIATE;\nextern unsigned IOCTL_BIOCVERSION;\nextern unsigned IOCTL_BIOCSTCPF;\nextern unsigned IOCTL_BIOCSUDPF;\nextern unsigned IOCTL_BIOCGHDRCMPLT;\nextern unsigned IOCTL_BIOCSHDRCMPLT;\nextern unsigned IOCTL_BIOCSDLT;\nextern unsigned IOCTL_BIOCGDLTLIST;\nextern unsigned IOCTL_BIOCGDIRECTION;\nextern unsigned IOCTL_BIOCSDIRECTION;\nextern unsigned IOCTL_BIOCSRTIMEOUT;\nextern unsigned IOCTL_BIOCGRTIMEOUT;\nextern unsigned IOCTL_BIOCGFEEDBACK;\nextern unsigned IOCTL_BIOCSFEEDBACK;\nextern unsigned IOCTL_GRESADDRS;\nextern unsigned IOCTL_GRESADDRD;\nextern unsigned IOCTL_GREGADDRS;\nextern unsigned IOCTL_GREGADDRD;\nextern unsigned IOCTL_GRESPROTO;\nextern unsigned IOCTL_GREGPROTO;\nextern unsigned IOCTL_GRESSOCK;\nextern unsigned IOCTL_GREDSOCK;\nextern unsigned IOCTL_PPPIOCGRAWIN;\nextern unsigned IOCTL_PPPIOCGFLAGS;\nextern unsigned IOCTL_PPPIOCSFLAGS;\nextern unsigned IOCTL_PPPIOCGASYNCMAP;\nextern unsigned IOCTL_PPPIOCSASYNCMAP;\nextern unsigned IOCTL_PPPIOCGUNIT;\nextern unsigned IOCTL_PPPIOCGRASYNCMAP;\nextern unsigned IOCTL_PPPIOCSRASYNCMAP;\nextern unsigned IOCTL_PPPIOCGMRU;\nextern unsigned IOCTL_PPPIOCSMRU;\nextern unsigned IOCTL_PPPIOCSMAXCID;\nextern unsigned IOCTL_PPPIOCGXASYNCMAP;\nextern unsigned IOCTL_PPPIOCSXASYNCMAP;\nextern unsigned IOCTL_PPPIOCXFERUNIT;\nextern unsigned IOCTL_PPPIOCSCOMPRESS;\nextern unsigned IOCTL_PPPIOCGNPMODE;\nextern unsigned IOCTL_PPPIOCSNPMODE;\nextern unsigned IOCTL_PPPIOCGIDLE;\nextern unsigned IOCTL_PPPIOCGMTU;\nextern unsigned IOCTL_PPPIOCSMTU;\nextern unsigned IOCTL_SIOCGPPPSTATS;\nextern unsigned IOCTL_SIOCGPPPCSTATS;\nextern unsigned IOCTL_IOC_NPF_VERSION;\nextern unsigned IOCTL_IOC_NPF_SWITCH;\nextern unsigned IOCTL_IOC_NPF_LOAD;\nextern unsigned IOCTL_IOC_NPF_TABLE;\nextern unsigned IOCTL_IOC_NPF_STATS;\nextern unsigned IOCTL_IOC_NPF_SAVE;\nextern unsigned IOCTL_IOC_NPF_RULE;\nextern unsigned IOCTL_IOC_NPF_CONN_LOOKUP;\nextern unsigned IOCTL_IOC_NPF_TABLE_REPLACE;\nextern unsigned IOCTL_PPPOESETPARMS;\nextern unsigned IOCTL_PPPOEGETPARMS;\nextern unsigned IOCTL_PPPOEGETSESSION;\nextern unsigned IOCTL_SPPPGETAUTHCFG;\nextern unsigned IOCTL_SPPPSETAUTHCFG;\nextern unsigned IOCTL_SPPPGETLCPCFG;\nextern unsigned IOCTL_SPPPSETLCPCFG;\nextern unsigned IOCTL_SPPPGETSTATUS;\nextern unsigned IOCTL_SPPPGETSTATUSNCP;\nextern unsigned IOCTL_SPPPGETIDLETO;\nextern unsigned IOCTL_SPPPSETIDLETO;\nextern unsigned IOCTL_SPPPGETAUTHFAILURES;\nextern unsigned IOCTL_SPPPSETAUTHFAILURE;\nextern unsigned IOCTL_SPPPSETDNSOPTS;\nextern unsigned IOCTL_SPPPGETDNSOPTS;\nextern unsigned IOCTL_SPPPGETDNSADDRS;\nextern unsigned IOCTL_SPPPSETKEEPALIVE;\nextern unsigned IOCTL_SPPPGETKEEPALIVE;\nextern unsigned IOCTL_SRT_GETNRT;\nextern unsigned IOCTL_SRT_GETRT;\nextern unsigned IOCTL_SRT_SETRT;\nextern unsigned IOCTL_SRT_DELRT;\nextern unsigned IOCTL_SRT_SFLAGS;\nextern unsigned IOCTL_SRT_GFLAGS;\nextern unsigned IOCTL_SRT_SGFLAGS;\nextern unsigned IOCTL_SRT_DEBUG;\nextern unsigned IOCTL_TAPGIFNAME;\nextern unsigned IOCTL_TUNSDEBUG;\nextern unsigned IOCTL_TUNGDEBUG;\nextern unsigned IOCTL_TUNSIFMODE;\nextern unsigned IOCTL_TUNSLMODE;\nextern unsigned IOCTL_TUNSIFHEAD;\nextern unsigned IOCTL_TUNGIFHEAD;\nextern unsigned IOCTL_DIOCSTART;\nextern unsigned IOCTL_DIOCSTOP;\nextern unsigned IOCTL_DIOCADDRULE;\nextern unsigned IOCTL_DIOCGETRULES;\nextern unsigned IOCTL_DIOCGETRULE;\nextern unsigned IOCTL_DIOCSETLCK;\nextern unsigned IOCTL_DIOCCLRSTATES;\nextern unsigned IOCTL_DIOCGETSTATE;\nextern unsigned IOCTL_DIOCSETSTATUSIF;\nextern unsigned IOCTL_DIOCGETSTATUS;\nextern unsigned IOCTL_DIOCCLRSTATUS;\nextern unsigned IOCTL_DIOCNATLOOK;\nextern unsigned IOCTL_DIOCSETDEBUG;\nextern unsigned IOCTL_DIOCGETSTATES;\nextern unsigned IOCTL_DIOCCHANGERULE;\nextern unsigned IOCTL_DIOCSETTIMEOUT;\nextern unsigned IOCTL_DIOCGETTIMEOUT;\nextern unsigned IOCTL_DIOCADDSTATE;\nextern unsigned IOCTL_DIOCCLRRULECTRS;\nextern unsigned IOCTL_DIOCGETLIMIT;\nextern unsigned IOCTL_DIOCSETLIMIT;\nextern unsigned IOCTL_DIOCKILLSTATES;\nextern unsigned IOCTL_DIOCSTARTALTQ;\nextern unsigned IOCTL_DIOCSTOPALTQ;\nextern unsigned IOCTL_DIOCADDALTQ;\nextern unsigned IOCTL_DIOCGETALTQS;\nextern unsigned IOCTL_DIOCGETALTQ;\nextern unsigned IOCTL_DIOCCHANGEALTQ;\nextern unsigned IOCTL_DIOCGETQSTATS;\nextern unsigned IOCTL_DIOCBEGINADDRS;\nextern unsigned IOCTL_DIOCADDADDR;\nextern unsigned IOCTL_DIOCGETADDRS;\nextern unsigned IOCTL_DIOCGETADDR;\nextern unsigned IOCTL_DIOCCHANGEADDR;\nextern unsigned IOCTL_DIOCADDSTATES;\nextern unsigned IOCTL_DIOCGETRULESETS;\nextern unsigned IOCTL_DIOCGETRULESET;\nextern unsigned IOCTL_DIOCRCLRTABLES;\nextern unsigned IOCTL_DIOCRADDTABLES;\nextern unsigned IOCTL_DIOCRDELTABLES;\nextern unsigned IOCTL_DIOCRGETTABLES;\nextern unsigned IOCTL_DIOCRGETTSTATS;\nextern unsigned IOCTL_DIOCRCLRTSTATS;\nextern unsigned IOCTL_DIOCRCLRADDRS;\nextern unsigned IOCTL_DIOCRADDADDRS;\nextern unsigned IOCTL_DIOCRDELADDRS;\nextern unsigned IOCTL_DIOCRSETADDRS;\nextern unsigned IOCTL_DIOCRGETADDRS;\nextern unsigned IOCTL_DIOCRGETASTATS;\nextern unsigned IOCTL_DIOCRCLRASTATS;\nextern unsigned IOCTL_DIOCRTSTADDRS;\nextern unsigned IOCTL_DIOCRSETTFLAGS;\nextern unsigned IOCTL_DIOCRINADEFINE;\nextern unsigned IOCTL_DIOCOSFPFLUSH;\nextern unsigned IOCTL_DIOCOSFPADD;\nextern unsigned IOCTL_DIOCOSFPGET;\nextern unsigned IOCTL_DIOCXBEGIN;\nextern unsigned IOCTL_DIOCXCOMMIT;\nextern unsigned IOCTL_DIOCXROLLBACK;\nextern unsigned IOCTL_DIOCGETSRCNODES;\nextern unsigned IOCTL_DIOCCLRSRCNODES;\nextern unsigned IOCTL_DIOCSETHOSTID;\nextern unsigned IOCTL_DIOCIGETIFACES;\nextern unsigned IOCTL_DIOCSETIFFLAG;\nextern unsigned IOCTL_DIOCCLRIFFLAG;\nextern unsigned IOCTL_DIOCKILLSRCNODES;\nextern unsigned IOCTL_SLIOCGUNIT;\nextern unsigned IOCTL_SIOCGBTINFO;\nextern unsigned IOCTL_SIOCGBTINFOA;\nextern unsigned IOCTL_SIOCNBTINFO;\nextern unsigned IOCTL_SIOCSBTFLAGS;\nextern unsigned IOCTL_SIOCSBTPOLICY;\nextern unsigned IOCTL_SIOCSBTPTYPE;\nextern unsigned IOCTL_SIOCGBTSTATS;\nextern unsigned IOCTL_SIOCZBTSTATS;\nextern unsigned IOCTL_SIOCBTDUMP;\nextern unsigned IOCTL_SIOCSBTSCOMTU;\nextern unsigned IOCTL_SIOCGBTFEAT;\nextern unsigned IOCTL_SIOCADNAT;\nextern unsigned IOCTL_SIOCRMNAT;\nextern unsigned IOCTL_SIOCGNATS;\nextern unsigned IOCTL_SIOCGNATL;\nextern unsigned IOCTL_SIOCPURGENAT;\nextern unsigned IOCTL_SIOCCONNECTX;\nextern unsigned IOCTL_SIOCCONNECTXDEL;\nextern unsigned IOCTL_SIOCSIFINFO_FLAGS;\nextern unsigned IOCTL_SIOCAADDRCTL_POLICY;\nextern unsigned IOCTL_SIOCDADDRCTL_POLICY;\nextern unsigned IOCTL_SMBIOC_OPENSESSION;\nextern unsigned IOCTL_SMBIOC_OPENSHARE;\nextern unsigned IOCTL_SMBIOC_REQUEST;\nextern unsigned IOCTL_SMBIOC_SETFLAGS;\nextern unsigned IOCTL_SMBIOC_LOOKUP;\nextern unsigned IOCTL_SMBIOC_READ;\nextern unsigned IOCTL_SMBIOC_WRITE;\nextern unsigned IOCTL_AGPIOC_INFO;\nextern unsigned IOCTL_AGPIOC_ACQUIRE;\nextern unsigned IOCTL_AGPIOC_RELEASE;\nextern unsigned IOCTL_AGPIOC_SETUP;\nextern unsigned IOCTL_AGPIOC_ALLOCATE;\nextern unsigned IOCTL_AGPIOC_DEALLOCATE;\nextern unsigned IOCTL_AGPIOC_BIND;\nextern unsigned IOCTL_AGPIOC_UNBIND;\nextern unsigned IOCTL_AUDIO_GETINFO;\nextern unsigned IOCTL_AUDIO_SETINFO;\nextern unsigned IOCTL_AUDIO_DRAIN;\nextern unsigned IOCTL_AUDIO_FLUSH;\nextern unsigned IOCTL_AUDIO_WSEEK;\nextern unsigned IOCTL_AUDIO_RERROR;\nextern unsigned IOCTL_AUDIO_GETDEV;\nextern unsigned IOCTL_AUDIO_GETENC;\nextern unsigned IOCTL_AUDIO_GETFD;\nextern unsigned IOCTL_AUDIO_SETFD;\nextern unsigned IOCTL_AUDIO_PERROR;\nextern unsigned IOCTL_AUDIO_GETIOFFS;\nextern unsigned IOCTL_AUDIO_GETOOFFS;\nextern unsigned IOCTL_AUDIO_GETPROPS;\nextern unsigned IOCTL_AUDIO_GETBUFINFO;\nextern unsigned IOCTL_AUDIO_SETCHAN;\nextern unsigned IOCTL_AUDIO_GETCHAN;\nextern unsigned IOCTL_AUDIO_QUERYFORMAT;\nextern unsigned IOCTL_AUDIO_GETFORMAT;\nextern unsigned IOCTL_AUDIO_SETFORMAT;\nextern unsigned IOCTL_AUDIO_MIXER_READ;\nextern unsigned IOCTL_AUDIO_MIXER_WRITE;\nextern unsigned IOCTL_AUDIO_MIXER_DEVINFO;\nextern unsigned IOCTL_ATAIOCCOMMAND;\nextern unsigned IOCTL_ATABUSIOSCAN;\nextern unsigned IOCTL_ATABUSIORESET;\nextern unsigned IOCTL_ATABUSIODETACH;\nextern unsigned IOCTL_CDIOCPLAYTRACKS;\nextern unsigned IOCTL_CDIOCPLAYBLOCKS;\nextern unsigned IOCTL_CDIOCREADSUBCHANNEL;\nextern unsigned IOCTL_CDIOREADTOCHEADER;\nextern unsigned IOCTL_CDIOREADTOCENTRIES;\nextern unsigned IOCTL_CDIOREADMSADDR;\nextern unsigned IOCTL_CDIOCSETPATCH;\nextern unsigned IOCTL_CDIOCGETVOL;\nextern unsigned IOCTL_CDIOCSETVOL;\nextern unsigned IOCTL_CDIOCSETMONO;\nextern unsigned IOCTL_CDIOCSETSTEREO;\nextern unsigned IOCTL_CDIOCSETMUTE;\nextern unsigned IOCTL_CDIOCSETLEFT;\nextern unsigned IOCTL_CDIOCSETRIGHT;\nextern unsigned IOCTL_CDIOCSETDEBUG;\nextern unsigned IOCTL_CDIOCCLRDEBUG;\nextern unsigned IOCTL_CDIOCPAUSE;\nextern unsigned IOCTL_CDIOCRESUME;\nextern unsigned IOCTL_CDIOCRESET;\nextern unsigned IOCTL_CDIOCSTART;\nextern unsigned IOCTL_CDIOCSTOP;\nextern unsigned IOCTL_CDIOCEJECT;\nextern unsigned IOCTL_CDIOCALLOW;\nextern unsigned IOCTL_CDIOCPREVENT;\nextern unsigned IOCTL_CDIOCCLOSE;\nextern unsigned IOCTL_CDIOCPLAYMSF;\nextern unsigned IOCTL_CDIOCLOADUNLOAD;\nextern unsigned IOCTL_CHIOMOVE;\nextern unsigned IOCTL_CHIOEXCHANGE;\nextern unsigned IOCTL_CHIOPOSITION;\nextern unsigned IOCTL_CHIOGPICKER;\nextern unsigned IOCTL_CHIOSPICKER;\nextern unsigned IOCTL_CHIOGPARAMS;\nextern unsigned IOCTL_CHIOIELEM;\nextern unsigned IOCTL_OCHIOGSTATUS;\nextern unsigned IOCTL_CHIOGSTATUS;\nextern unsigned IOCTL_CHIOSVOLTAG;\nextern unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY;\nextern unsigned IOCTL_CLOCKCTL_ADJTIME;\nextern unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME;\nextern unsigned IOCTL_CLOCKCTL_NTP_ADJTIME;\nextern unsigned IOCTL_IOC_CPU_SETSTATE;\nextern unsigned IOCTL_IOC_CPU_GETSTATE;\nextern unsigned IOCTL_IOC_CPU_GETCOUNT;\nextern unsigned IOCTL_IOC_CPU_MAPID;\nextern unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION;\nextern unsigned IOCTL_IOC_CPU_UCODE_APPLY;\nextern unsigned IOCTL_DIOCGDINFO;\nextern unsigned IOCTL_DIOCSDINFO;\nextern unsigned IOCTL_DIOCWDINFO;\nextern unsigned IOCTL_DIOCRFORMAT;\nextern unsigned IOCTL_DIOCWFORMAT;\nextern unsigned IOCTL_DIOCSSTEP;\nextern unsigned IOCTL_DIOCSRETRIES;\nextern unsigned IOCTL_DIOCKLABEL;\nextern unsigned IOCTL_DIOCWLABEL;\nextern unsigned IOCTL_DIOCSBAD;\nextern unsigned IOCTL_DIOCEJECT;\nextern unsigned IOCTL_ODIOCEJECT;\nextern unsigned IOCTL_DIOCLOCK;\nextern unsigned IOCTL_DIOCGDEFLABEL;\nextern unsigned IOCTL_DIOCCLRLABEL;\nextern unsigned IOCTL_DIOCGCACHE;\nextern unsigned IOCTL_DIOCSCACHE;\nextern unsigned IOCTL_DIOCCACHESYNC;\nextern unsigned IOCTL_DIOCBSLIST;\nextern unsigned IOCTL_DIOCBSFLUSH;\nextern unsigned IOCTL_DIOCAWEDGE;\nextern unsigned IOCTL_DIOCGWEDGEINFO;\nextern unsigned IOCTL_DIOCDWEDGE;\nextern unsigned IOCTL_DIOCLWEDGES;\nextern unsigned IOCTL_DIOCGSTRATEGY;\nextern unsigned IOCTL_DIOCSSTRATEGY;\nextern unsigned IOCTL_DIOCGDISKINFO;\nextern unsigned IOCTL_DIOCTUR;\nextern unsigned IOCTL_DIOCMWEDGES;\nextern unsigned IOCTL_DIOCGSECTORSIZE;\nextern unsigned IOCTL_DIOCGMEDIASIZE;\nextern unsigned IOCTL_DIOCRMWEDGES;\nextern unsigned IOCTL_DRVDETACHDEV;\nextern unsigned IOCTL_DRVRESCANBUS;\nextern unsigned IOCTL_DRVCTLCOMMAND;\nextern unsigned IOCTL_DRVRESUMEDEV;\nextern unsigned IOCTL_DRVLISTDEV;\nextern unsigned IOCTL_DRVGETEVENT;\nextern unsigned IOCTL_DRVSUSPENDDEV;\nextern unsigned IOCTL_DVD_READ_STRUCT;\nextern unsigned IOCTL_DVD_WRITE_STRUCT;\nextern unsigned IOCTL_DVD_AUTH;\nextern unsigned IOCTL_ENVSYS_GETDICTIONARY;\nextern unsigned IOCTL_ENVSYS_SETDICTIONARY;\nextern unsigned IOCTL_ENVSYS_REMOVEPROPS;\nextern unsigned IOCTL_ENVSYS_GTREDATA;\nextern unsigned IOCTL_ENVSYS_GTREINFO;\nextern unsigned IOCTL_KFILTER_BYFILTER;\nextern unsigned IOCTL_KFILTER_BYNAME;\nextern unsigned IOCTL_FDIOCGETOPTS;\nextern unsigned IOCTL_FDIOCSETOPTS;\nextern unsigned IOCTL_FDIOCSETFORMAT;\nextern unsigned IOCTL_FDIOCGETFORMAT;\nextern unsigned IOCTL_FDIOCFORMAT_TRACK;\nextern unsigned IOCTL_FIOCLEX;\nextern unsigned IOCTL_FIONCLEX;\nextern unsigned IOCTL_FIOSEEKDATA;\nextern unsigned IOCTL_FIOSEEKHOLE;\nextern unsigned IOCTL_FIONREAD;\nextern unsigned IOCTL_FIONBIO;\nextern unsigned IOCTL_FIOASYNC;\nextern unsigned IOCTL_FIOSETOWN;\nextern unsigned IOCTL_FIOGETOWN;\nextern unsigned IOCTL_OFIOGETBMAP;\nextern unsigned IOCTL_FIOGETBMAP;\nextern unsigned IOCTL_FIONWRITE;\nextern unsigned IOCTL_FIONSPACE;\nextern unsigned IOCTL_GPIOINFO;\nextern unsigned IOCTL_GPIOSET;\nextern unsigned IOCTL_GPIOUNSET;\nextern unsigned IOCTL_GPIOREAD;\nextern unsigned IOCTL_GPIOWRITE;\nextern unsigned IOCTL_GPIOTOGGLE;\nextern unsigned IOCTL_GPIOATTACH;\nextern unsigned IOCTL_PTIOCNETBSD;\nextern unsigned IOCTL_PTIOCSUNOS;\nextern unsigned IOCTL_PTIOCLINUX;\nextern unsigned IOCTL_PTIOCFREEBSD;\nextern unsigned IOCTL_PTIOCULTRIX;\nextern unsigned IOCTL_TIOCHPCL;\nextern unsigned IOCTL_TIOCGETP;\nextern unsigned IOCTL_TIOCSETP;\nextern unsigned IOCTL_TIOCSETN;\nextern unsigned IOCTL_TIOCSETC;\nextern unsigned IOCTL_TIOCGETC;\nextern unsigned IOCTL_TIOCLBIS;\nextern unsigned IOCTL_TIOCLBIC;\nextern unsigned IOCTL_TIOCLSET;\nextern unsigned IOCTL_TIOCLGET;\nextern unsigned IOCTL_TIOCSLTC;\nextern unsigned IOCTL_TIOCGLTC;\nextern unsigned IOCTL_OTIOCCONS;\nextern unsigned IOCTL_JOY_SETTIMEOUT;\nextern unsigned IOCTL_JOY_GETTIMEOUT;\nextern unsigned IOCTL_JOY_SET_X_OFFSET;\nextern unsigned IOCTL_JOY_SET_Y_OFFSET;\nextern unsigned IOCTL_JOY_GET_X_OFFSET;\nextern unsigned IOCTL_JOY_GET_Y_OFFSET;\nextern unsigned IOCTL_OKIOCGSYMBOL;\nextern unsigned IOCTL_OKIOCGVALUE;\nextern unsigned IOCTL_KIOCGSIZE;\nextern unsigned IOCTL_KIOCGVALUE;\nextern unsigned IOCTL_KIOCGSYMBOL;\nextern unsigned IOCTL_LUAINFO;\nextern unsigned IOCTL_LUACREATE;\nextern unsigned IOCTL_LUADESTROY;\nextern unsigned IOCTL_LUAREQUIRE;\nextern unsigned IOCTL_LUALOAD;\nextern unsigned IOCTL_MIDI_PRETIME;\nextern unsigned IOCTL_MIDI_MPUMODE;\nextern unsigned IOCTL_MIDI_MPUCMD;\nextern unsigned IOCTL_SEQUENCER_RESET;\nextern unsigned IOCTL_SEQUENCER_SYNC;\nextern unsigned IOCTL_SEQUENCER_INFO;\nextern unsigned IOCTL_SEQUENCER_CTRLRATE;\nextern unsigned IOCTL_SEQUENCER_GETOUTCOUNT;\nextern unsigned IOCTL_SEQUENCER_GETINCOUNT;\nextern unsigned IOCTL_SEQUENCER_RESETSAMPLES;\nextern unsigned IOCTL_SEQUENCER_NRSYNTHS;\nextern unsigned IOCTL_SEQUENCER_NRMIDIS;\nextern unsigned IOCTL_SEQUENCER_THRESHOLD;\nextern unsigned IOCTL_SEQUENCER_MEMAVL;\nextern unsigned IOCTL_SEQUENCER_PANIC;\nextern unsigned IOCTL_SEQUENCER_OUTOFBAND;\nextern unsigned IOCTL_SEQUENCER_GETTIME;\nextern unsigned IOCTL_SEQUENCER_TMR_TIMEBASE;\nextern unsigned IOCTL_SEQUENCER_TMR_START;\nextern unsigned IOCTL_SEQUENCER_TMR_STOP;\nextern unsigned IOCTL_SEQUENCER_TMR_CONTINUE;\nextern unsigned IOCTL_SEQUENCER_TMR_TEMPO;\nextern unsigned IOCTL_SEQUENCER_TMR_SOURCE;\nextern unsigned IOCTL_SEQUENCER_TMR_METRONOME;\nextern unsigned IOCTL_SEQUENCER_TMR_SELECT;\nextern unsigned IOCTL_SPI_IOCTL_CONFIGURE;\nextern unsigned IOCTL_SPI_IOCTL_TRANSFER;\nextern unsigned IOCTL_MTIOCTOP;\nextern unsigned IOCTL_MTIOCGET;\nextern unsigned IOCTL_MTIOCIEOT;\nextern unsigned IOCTL_MTIOCEEOT;\nextern unsigned IOCTL_MTIOCRDSPOS;\nextern unsigned IOCTL_MTIOCRDHPOS;\nextern unsigned IOCTL_MTIOCSLOCATE;\nextern unsigned IOCTL_MTIOCHLOCATE;\nextern unsigned IOCTL_POWER_EVENT_RECVDICT;\nextern unsigned IOCTL_POWER_IOC_GET_TYPE;\nextern unsigned IOCTL_RIOCGINFO;\nextern unsigned IOCTL_RIOCSINFO;\nextern unsigned IOCTL_RIOCSSRCH;\nextern unsigned IOCTL_RNDGETENTCNT;\nextern unsigned IOCTL_RNDGETSRCNUM;\nextern unsigned IOCTL_RNDGETSRCNAME;\nextern unsigned IOCTL_RNDCTL;\nextern unsigned IOCTL_RNDADDDATA;\nextern unsigned IOCTL_RNDGETPOOLSTAT;\nextern unsigned IOCTL_RNDGETESTNUM;\nextern unsigned IOCTL_RNDGETESTNAME;\nextern unsigned IOCTL_SCIOCGET;\nextern unsigned IOCTL_SCIOCSET;\nextern unsigned IOCTL_SCIOCRESTART;\nextern unsigned IOCTL_SCIOC_USE_ADF;\nextern unsigned IOCTL_SCIOCCOMMAND;\nextern unsigned IOCTL_SCIOCDEBUG;\nextern unsigned IOCTL_SCIOCIDENTIFY;\nextern unsigned IOCTL_OSCIOCIDENTIFY;\nextern unsigned IOCTL_SCIOCDECONFIG;\nextern unsigned IOCTL_SCIOCRECONFIG;\nextern unsigned IOCTL_SCIOCRESET;\nextern unsigned IOCTL_SCBUSIOSCAN;\nextern unsigned IOCTL_SCBUSIORESET;\nextern unsigned IOCTL_SCBUSIODETACH;\nextern unsigned IOCTL_SCBUSACCEL;\nextern unsigned IOCTL_SCBUSIOLLSCAN;\nextern unsigned IOCTL_SIOCSHIWAT;\nextern unsigned IOCTL_SIOCGHIWAT;\nextern unsigned IOCTL_SIOCSLOWAT;\nextern unsigned IOCTL_SIOCGLOWAT;\nextern unsigned IOCTL_SIOCATMARK;\nextern unsigned IOCTL_SIOCSPGRP;\nextern unsigned IOCTL_SIOCGPGRP;\nextern unsigned IOCTL_SIOCPEELOFF;\nextern unsigned IOCTL_SIOCADDRT;\nextern unsigned IOCTL_SIOCDELRT;\nextern unsigned IOCTL_SIOCSIFADDR;\nextern unsigned IOCTL_SIOCGIFADDR;\nextern unsigned IOCTL_SIOCSIFDSTADDR;\nextern unsigned IOCTL_SIOCGIFDSTADDR;\nextern unsigned IOCTL_SIOCSIFFLAGS;\nextern unsigned IOCTL_SIOCGIFFLAGS;\nextern unsigned IOCTL_SIOCGIFBRDADDR;\nextern unsigned IOCTL_SIOCSIFBRDADDR;\nextern unsigned IOCTL_SIOCGIFCONF;\nextern unsigned IOCTL_SIOCGIFNETMASK;\nextern unsigned IOCTL_SIOCSIFNETMASK;\nextern unsigned IOCTL_SIOCGIFMETRIC;\nextern unsigned IOCTL_SIOCSIFMETRIC;\nextern unsigned IOCTL_SIOCDIFADDR;\nextern unsigned IOCTL_SIOCAIFADDR;\nextern unsigned IOCTL_SIOCGIFALIAS;\nextern unsigned IOCTL_SIOCGIFAFLAG_IN;\nextern unsigned IOCTL_SIOCALIFADDR;\nextern unsigned IOCTL_SIOCGLIFADDR;\nextern unsigned IOCTL_SIOCDLIFADDR;\nextern unsigned IOCTL_SIOCSIFADDRPREF;\nextern unsigned IOCTL_SIOCGIFADDRPREF;\nextern unsigned IOCTL_SIOCADDMULTI;\nextern unsigned IOCTL_SIOCDELMULTI;\nextern unsigned IOCTL_SIOCGETVIFCNT;\nextern unsigned IOCTL_SIOCGETSGCNT;\nextern unsigned IOCTL_SIOCSIFMEDIA;\nextern unsigned IOCTL_SIOCGIFMEDIA;\nextern unsigned IOCTL_SIOCSIFGENERIC;\nextern unsigned IOCTL_SIOCGIFGENERIC;\nextern unsigned IOCTL_SIOCSIFPHYADDR;\nextern unsigned IOCTL_SIOCGIFPSRCADDR;\nextern unsigned IOCTL_SIOCGIFPDSTADDR;\nextern unsigned IOCTL_SIOCDIFPHYADDR;\nextern unsigned IOCTL_SIOCSLIFPHYADDR;\nextern unsigned IOCTL_SIOCGLIFPHYADDR;\nextern unsigned IOCTL_SIOCSIFMTU;\nextern unsigned IOCTL_SIOCGIFMTU;\nextern unsigned IOCTL_SIOCSDRVSPEC;\nextern unsigned IOCTL_SIOCGDRVSPEC;\nextern unsigned IOCTL_SIOCIFCREATE;\nextern unsigned IOCTL_SIOCIFDESTROY;\nextern unsigned IOCTL_SIOCIFGCLONERS;\nextern unsigned IOCTL_SIOCGIFDLT;\nextern unsigned IOCTL_SIOCGIFCAP;\nextern unsigned IOCTL_SIOCSIFCAP;\nextern unsigned IOCTL_SIOCSVH;\nextern unsigned IOCTL_SIOCGVH;\nextern unsigned IOCTL_SIOCINITIFADDR;\nextern unsigned IOCTL_SIOCGIFDATA;\nextern unsigned IOCTL_SIOCZIFDATA;\nextern unsigned IOCTL_SIOCGLINKSTR;\nextern unsigned IOCTL_SIOCSLINKSTR;\nextern unsigned IOCTL_SIOCGETHERCAP;\nextern unsigned IOCTL_SIOCGIFINDEX;\nextern unsigned IOCTL_SIOCSETHERCAP;\nextern unsigned IOCTL_SIOCSIFDESCR;\nextern unsigned IOCTL_SIOCGIFDESCR;\nextern unsigned IOCTL_SIOCGUMBINFO;\nextern unsigned IOCTL_SIOCSUMBPARAM;\nextern unsigned IOCTL_SIOCGUMBPARAM;\nextern unsigned IOCTL_SIOCSETPFSYNC;\nextern unsigned IOCTL_SIOCGETPFSYNC;\nextern unsigned IOCTL_PPS_IOC_CREATE;\nextern unsigned IOCTL_PPS_IOC_DESTROY;\nextern unsigned IOCTL_PPS_IOC_SETPARAMS;\nextern unsigned IOCTL_PPS_IOC_GETPARAMS;\nextern unsigned IOCTL_PPS_IOC_GETCAP;\nextern unsigned IOCTL_PPS_IOC_FETCH;\nextern unsigned IOCTL_PPS_IOC_KCBIND;\nextern unsigned IOCTL_TIOCEXCL;\nextern unsigned IOCTL_TIOCNXCL;\nextern unsigned IOCTL_TIOCFLUSH;\nextern unsigned IOCTL_TIOCGETA;\nextern unsigned IOCTL_TIOCSETA;\nextern unsigned IOCTL_TIOCSETAW;\nextern unsigned IOCTL_TIOCSETAF;\nextern unsigned IOCTL_TIOCGETD;\nextern unsigned IOCTL_TIOCSETD;\nextern unsigned IOCTL_TIOCGLINED;\nextern unsigned IOCTL_TIOCSLINED;\nextern unsigned IOCTL_TIOCSBRK;\nextern unsigned IOCTL_TIOCCBRK;\nextern unsigned IOCTL_TIOCSDTR;\nextern unsigned IOCTL_TIOCCDTR;\nextern unsigned IOCTL_TIOCGPGRP;\nextern unsigned IOCTL_TIOCSPGRP;\nextern unsigned IOCTL_TIOCOUTQ;\nextern unsigned IOCTL_TIOCSTI;\nextern unsigned IOCTL_TIOCNOTTY;\nextern unsigned IOCTL_TIOCPKT;\nextern unsigned IOCTL_TIOCSTOP;\nextern unsigned IOCTL_TIOCSTART;\nextern unsigned IOCTL_TIOCMSET;\nextern unsigned IOCTL_TIOCMBIS;\nextern unsigned IOCTL_TIOCMBIC;\nextern unsigned IOCTL_TIOCMGET;\nextern unsigned IOCTL_TIOCREMOTE;\nextern unsigned IOCTL_TIOCGWINSZ;\nextern unsigned IOCTL_TIOCSWINSZ;\nextern unsigned IOCTL_TIOCUCNTL;\nextern unsigned IOCTL_TIOCSTAT;\nextern unsigned IOCTL_TIOCGSID;\nextern unsigned IOCTL_TIOCCONS;\nextern unsigned IOCTL_TIOCSCTTY;\nextern unsigned IOCTL_TIOCEXT;\nextern unsigned IOCTL_TIOCSIG;\nextern unsigned IOCTL_TIOCDRAIN;\nextern unsigned IOCTL_TIOCGFLAGS;\nextern unsigned IOCTL_TIOCSFLAGS;\nextern unsigned IOCTL_TIOCDCDTIMESTAMP;\nextern unsigned IOCTL_TIOCRCVFRAME;\nextern unsigned IOCTL_TIOCXMTFRAME;\nextern unsigned IOCTL_TIOCPTMGET;\nextern unsigned IOCTL_TIOCGRANTPT;\nextern unsigned IOCTL_TIOCPTSNAME;\nextern unsigned IOCTL_TIOCSQSIZE;\nextern unsigned IOCTL_TIOCGQSIZE;\nextern unsigned IOCTL_VERIEXEC_LOAD;\nextern unsigned IOCTL_VERIEXEC_TABLESIZE;\nextern unsigned IOCTL_VERIEXEC_DELETE;\nextern unsigned IOCTL_VERIEXEC_QUERY;\nextern unsigned IOCTL_VERIEXEC_DUMP;\nextern unsigned IOCTL_VERIEXEC_FLUSH;\nextern unsigned IOCTL_VIDIOC_QUERYCAP;\nextern unsigned IOCTL_VIDIOC_RESERVED;\nextern unsigned IOCTL_VIDIOC_ENUM_FMT;\nextern unsigned IOCTL_VIDIOC_G_FMT;\nextern unsigned IOCTL_VIDIOC_S_FMT;\nextern unsigned IOCTL_VIDIOC_REQBUFS;\nextern unsigned IOCTL_VIDIOC_QUERYBUF;\nextern unsigned IOCTL_VIDIOC_G_FBUF;\nextern unsigned IOCTL_VIDIOC_S_FBUF;\nextern unsigned IOCTL_VIDIOC_OVERLAY;\nextern unsigned IOCTL_VIDIOC_QBUF;\nextern unsigned IOCTL_VIDIOC_DQBUF;\nextern unsigned IOCTL_VIDIOC_STREAMON;\nextern unsigned IOCTL_VIDIOC_STREAMOFF;\nextern unsigned IOCTL_VIDIOC_G_PARM;\nextern unsigned IOCTL_VIDIOC_S_PARM;\nextern unsigned IOCTL_VIDIOC_G_STD;\nextern unsigned IOCTL_VIDIOC_S_STD;\nextern unsigned IOCTL_VIDIOC_ENUMSTD;\nextern unsigned IOCTL_VIDIOC_ENUMINPUT;\nextern unsigned IOCTL_VIDIOC_G_CTRL;\nextern unsigned IOCTL_VIDIOC_S_CTRL;\nextern unsigned IOCTL_VIDIOC_G_TUNER;\nextern unsigned IOCTL_VIDIOC_S_TUNER;\nextern unsigned IOCTL_VIDIOC_G_AUDIO;\nextern unsigned IOCTL_VIDIOC_S_AUDIO;\nextern unsigned IOCTL_VIDIOC_QUERYCTRL;\nextern unsigned IOCTL_VIDIOC_QUERYMENU;\nextern unsigned IOCTL_VIDIOC_G_INPUT;\nextern unsigned IOCTL_VIDIOC_S_INPUT;\nextern unsigned IOCTL_VIDIOC_G_OUTPUT;\nextern unsigned IOCTL_VIDIOC_S_OUTPUT;\nextern unsigned IOCTL_VIDIOC_ENUMOUTPUT;\nextern unsigned IOCTL_VIDIOC_G_AUDOUT;\nextern unsigned IOCTL_VIDIOC_S_AUDOUT;\nextern unsigned IOCTL_VIDIOC_G_MODULATOR;\nextern unsigned IOCTL_VIDIOC_S_MODULATOR;\nextern unsigned IOCTL_VIDIOC_G_FREQUENCY;\nextern unsigned IOCTL_VIDIOC_S_FREQUENCY;\nextern unsigned IOCTL_VIDIOC_CROPCAP;\nextern unsigned IOCTL_VIDIOC_G_CROP;\nextern unsigned IOCTL_VIDIOC_S_CROP;\nextern unsigned IOCTL_VIDIOC_G_JPEGCOMP;\nextern unsigned IOCTL_VIDIOC_S_JPEGCOMP;\nextern unsigned IOCTL_VIDIOC_QUERYSTD;\nextern unsigned IOCTL_VIDIOC_TRY_FMT;\nextern unsigned IOCTL_VIDIOC_ENUMAUDIO;\nextern unsigned IOCTL_VIDIOC_ENUMAUDOUT;\nextern unsigned IOCTL_VIDIOC_G_PRIORITY;\nextern unsigned IOCTL_VIDIOC_S_PRIORITY;\nextern unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES;\nextern unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS;\nextern unsigned IOCTL_WDOGIOC_GMODE;\nextern unsigned IOCTL_WDOGIOC_SMODE;\nextern unsigned IOCTL_WDOGIOC_WHICH;\nextern unsigned IOCTL_WDOGIOC_TICKLE;\nextern unsigned IOCTL_WDOGIOC_GTICKLER;\nextern unsigned IOCTL_WDOGIOC_GWDOGS;\nextern unsigned IOCTL_KCOV_IOC_SETBUFSIZE;\nextern unsigned IOCTL_KCOV_IOC_ENABLE;\nextern unsigned IOCTL_KCOV_IOC_DISABLE;\nextern unsigned IOCTL_IPMICTL_RECEIVE_MSG_TRUNC;\nextern unsigned IOCTL_IPMICTL_RECEIVE_MSG;\nextern unsigned IOCTL_IPMICTL_SEND_COMMAND;\nextern unsigned IOCTL_IPMICTL_REGISTER_FOR_CMD;\nextern unsigned IOCTL_IPMICTL_UNREGISTER_FOR_CMD;\nextern unsigned IOCTL_IPMICTL_SET_GETS_EVENTS_CMD;\nextern unsigned IOCTL_IPMICTL_SET_MY_ADDRESS_CMD;\nextern unsigned IOCTL_IPMICTL_GET_MY_ADDRESS_CMD;\nextern unsigned IOCTL_IPMICTL_SET_MY_LUN_CMD;\nextern unsigned IOCTL_IPMICTL_GET_MY_LUN_CMD;\nextern unsigned IOCTL_SNDCTL_DSP_RESET;\nextern unsigned IOCTL_SNDCTL_DSP_SYNC;\nextern unsigned IOCTL_SNDCTL_DSP_SPEED;\nextern unsigned IOCTL_SOUND_PCM_READ_RATE;\nextern unsigned IOCTL_SNDCTL_DSP_STEREO;\nextern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;\nextern unsigned IOCTL_SNDCTL_DSP_SETFMT;\nextern unsigned IOCTL_SOUND_PCM_READ_BITS;\nextern unsigned IOCTL_SNDCTL_DSP_CHANNELS;\nextern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;\nextern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;\nextern unsigned IOCTL_SOUND_PCM_READ_FILTER;\nextern unsigned IOCTL_SNDCTL_DSP_POST;\nextern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;\nextern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;\nextern unsigned IOCTL_SNDCTL_DSP_GETFMTS;\nextern unsigned IOCTL_SNDCTL_DSP_GETOSPACE;\nextern unsigned IOCTL_SNDCTL_DSP_GETISPACE;\nextern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;\nextern unsigned IOCTL_SNDCTL_DSP_GETCAPS;\nextern unsigned IOCTL_SNDCTL_DSP_GETTRIGGER;\nextern unsigned IOCTL_SNDCTL_DSP_SETTRIGGER;\nextern unsigned IOCTL_SNDCTL_DSP_GETIPTR;\nextern unsigned IOCTL_SNDCTL_DSP_GETOPTR;\nextern unsigned IOCTL_SNDCTL_DSP_MAPINBUF;\nextern unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF;\nextern unsigned IOCTL_SNDCTL_DSP_SETSYNCRO;\nextern unsigned IOCTL_SNDCTL_DSP_SETDUPLEX;\nextern unsigned IOCTL_SNDCTL_DSP_PROFILE;\nextern unsigned IOCTL_SNDCTL_DSP_GETODELAY;\nextern unsigned IOCTL_SOUND_MIXER_INFO;\nextern unsigned IOCTL_SOUND_OLD_MIXER_INFO;\nextern unsigned IOCTL_OSS_GETVERSION;\nextern unsigned IOCTL_SNDCTL_SYSINFO;\nextern unsigned IOCTL_SNDCTL_AUDIOINFO;\nextern unsigned IOCTL_SNDCTL_ENGINEINFO;\nextern unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL;\nextern unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL;\nextern unsigned IOCTL_SNDCTL_DSP_GETRECVOL;\nextern unsigned IOCTL_SNDCTL_DSP_SETRECVOL;\nextern unsigned IOCTL_SNDCTL_DSP_SKIP;\nextern unsigned IOCTL_SNDCTL_DSP_SILENCE;\n\nextern const int si_SEGV_MAPERR;\nextern const int si_SEGV_ACCERR;\n\nextern const unsigned SHA1_CTX_sz;\nextern const unsigned SHA1_return_length;\n\nextern const unsigned MD4_CTX_sz;\nextern const unsigned MD4_return_length;\n\nextern const unsigned RMD160_CTX_sz;\nextern const unsigned RMD160_return_length;\n\nextern const unsigned MD5_CTX_sz;\nextern const unsigned MD5_return_length;\n\nextern const unsigned fpos_t_sz;\n\nextern const unsigned MD2_CTX_sz;\nextern const unsigned MD2_return_length;\n\n#define SHA2_EXTERN(LEN)                          \\\n  extern const unsigned SHA##LEN##_CTX_sz;        \\\n  extern const unsigned SHA##LEN##_return_length; \\\n  extern const unsigned SHA##LEN##_block_length;  \\\n  extern const unsigned SHA##LEN##_digest_length\n\nSHA2_EXTERN(224);\nSHA2_EXTERN(256);\nSHA2_EXTERN(384);\nSHA2_EXTERN(512);\n\n#undef SHA2_EXTERN\n\nextern const int unvis_valid;\nextern const int unvis_validpush;\n\nstruct __sanitizer_cdbr {\n  void (*unmap)(void *, void *, uptr);\n  void *cookie;\n  u8 *mmap_base;\n  uptr mmap_size;\n\n  u8 *hash_base;\n  u8 *offset_base;\n  u8 *data_base;\n\n  u32 data_size;\n  u32 entries;\n  u32 entries_index;\n  u32 seed;\n\n  u8 offset_size;\n  u8 index_size;\n\n  u32 entries_m;\n  u32 entries_index_m;\n  u8 entries_s1, entries_s2;\n  u8 entries_index_s1, entries_index_s2;\n};\n\nstruct __sanitizer_cdbw {\n  uptr data_counter;\n  uptr data_allocated;\n  uptr data_size;\n  uptr *data_len;\n  void **data_ptr;\n  uptr hash_size;\n  void *hash;\n  uptr key_counter;\n};\n}  // namespace __sanitizer\n\n#define CHECK_TYPE_SIZE(TYPE) \\\n  COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))\n\n#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER)                      \\\n  COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \\\n                 sizeof(((CLASS *)NULL)->MEMBER));                \\\n  COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) ==         \\\n                 offsetof(CLASS, MEMBER))\n\n// For sigaction, which is a function and struct at the same time,\n// and thus requires explicit \"struct\" in sizeof() expression.\n#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER)                      \\\n  COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \\\n                 sizeof(((struct CLASS *)NULL)->MEMBER));                \\\n  COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) ==         \\\n                 offsetof(struct CLASS, MEMBER))\n\n#define SIGACTION_SYMNAME __sigaction14\n\n// Compat with 9.0\nextern unsigned struct_statvfs90_sz;\n\n#endif  // SANITIZER_NETBSD\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_openbsd.cpp",
    "content": ""
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_openbsd.h",
    "content": ""
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_posix.cpp",
    "content": "//===-- sanitizer_platform_limits_posix.cpp -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific POSIX data structures.\n//===----------------------------------------------------------------------===//\n\n#if defined(__linux__) || defined(__APPLE__)\n// Tests in this file assume that off_t-dependent data structures match the\n// libc ABI. For example, struct dirent here is what readdir() function (as\n// exported from libc) returns, and not the user-facing \"dirent\", which\n// depends on _FILE_OFFSET_BITS setting.\n// To get this \"true\" dirent definition, we undefine _FILE_OFFSET_BITS below.\n#undef _FILE_OFFSET_BITS\n#endif\n\n// Must go after undef _FILE_OFFSET_BITS.\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_LINUX || SANITIZER_MAC\n// Must go after undef _FILE_OFFSET_BITS.\n#include \"sanitizer_glibc_version.h\"\n\n#include <arpa/inet.h>\n#include <dirent.h>\n#include <grp.h>\n#include <limits.h>\n#include <net/if.h>\n#include <netdb.h>\n#include <poll.h>\n#include <pthread.h>\n#include <pwd.h>\n#include <signal.h>\n#include <stddef.h>\n#include <stdio.h>\n#include <sys/mman.h>\n#include <sys/resource.h>\n#include <sys/socket.h>\n#include <sys/stat.h>\n#include <sys/time.h>\n#include <sys/times.h>\n#include <sys/types.h>\n#include <sys/utsname.h>\n#include <termios.h>\n#include <time.h>\n#include <wchar.h>\n#include <regex.h>\n#if !SANITIZER_MAC\n#include <utmp.h>\n#endif\n\n#if !SANITIZER_IOS\n#include <net/route.h>\n#endif\n\n#if !SANITIZER_ANDROID\n#include <sys/mount.h>\n#include <sys/timeb.h>\n#include <utmpx.h>\n#endif\n\n#if SANITIZER_LINUX\n#include <malloc.h>\n#include <mntent.h>\n#include <netinet/ether.h>\n#include <sys/sysinfo.h>\n#include <sys/vt.h>\n#include <linux/cdrom.h>\n#include <linux/fd.h>\n#include <linux/fs.h>\n#include <linux/hdreg.h>\n#include <linux/input.h>\n#include <linux/ioctl.h>\n#include <linux/soundcard.h>\n#include <linux/sysctl.h>\n#include <linux/utsname.h>\n#include <linux/posix_types.h>\n#include <net/if_arp.h>\n#endif\n\n#if SANITIZER_IOS\n#undef IOC_DIRMASK\n#endif\n\n#if SANITIZER_LINUX\n# include <utime.h>\n# include <sys/ptrace.h>\n#    if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \\\n        defined(__hexagon__) || SANITIZER_RISCV64\n#      include <asm/ptrace.h>\n#      ifdef __arm__\ntypedef struct user_fpregs elf_fpregset_t;\n#   define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)\n#   if !defined(ARM_VFPREGS_SIZE)\n#     define ARM_VFPREGS_SIZE ARM_VFPREGS_SIZE_ASAN\n#   endif\n#  endif\n# endif\n# include <semaphore.h>\n#endif\n\n#if !SANITIZER_ANDROID\n#include <ifaddrs.h>\n#include <sys/ucontext.h>\n#include <wordexp.h>\n#endif\n\n#if SANITIZER_LINUX\n#if SANITIZER_GLIBC\n#include <fstab.h>\n#include <net/if_ppp.h>\n#include <netax25/ax25.h>\n#include <netipx/ipx.h>\n#include <netrom/netrom.h>\n#include <obstack.h>\n#if HAVE_RPC_XDR_H\n# include <rpc/xdr.h>\n#endif\n#include <scsi/scsi.h>\n#else\n#include <linux/if_ppp.h>\n#include <linux/kd.h>\n#include <linux/ppp_defs.h>\n#endif  // SANITIZER_GLIBC\n\n#if SANITIZER_ANDROID\n#include <linux/mtio.h>\n#else\n#include <glob.h>\n#include <mqueue.h>\n#include <sys/kd.h>\n#include <sys/mtio.h>\n#include <sys/shm.h>\n#include <sys/statvfs.h>\n#include <sys/timex.h>\n#if defined(__mips64)\n# include <sys/procfs.h>\n#endif\n#include <sys/user.h>\n#include <linux/if_eql.h>\n#include <linux/if_plip.h>\n#include <linux/lp.h>\n#include <linux/mroute.h>\n#include <linux/mroute6.h>\n#include <linux/scc.h>\n#include <linux/serial.h>\n#include <sys/msg.h>\n#include <sys/ipc.h>\n#include <crypt.h>\n#endif  // SANITIZER_ANDROID\n\n#include <link.h>\n#include <sys/vfs.h>\n#include <sys/epoll.h>\n#include <linux/capability.h>\n#else\n#include <fstab.h>\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_MAC\n#include <net/ethernet.h>\n#include <sys/filio.h>\n#include <sys/sockio.h>\n#endif\n\n// Include these after system headers to avoid name clashes and ambiguities.\n#  include \"sanitizer_common.h\"\n#  include \"sanitizer_internal_defs.h\"\n#  include \"sanitizer_platform_limits_posix.h\"\n\nnamespace __sanitizer {\n  unsigned struct_utsname_sz = sizeof(struct utsname);\n  unsigned struct_stat_sz = sizeof(struct stat);\n#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)\n  unsigned struct_stat64_sz = sizeof(struct stat64);\n#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)\n  unsigned struct_rusage_sz = sizeof(struct rusage);\n  unsigned struct_tm_sz = sizeof(struct tm);\n  unsigned struct_passwd_sz = sizeof(struct passwd);\n  unsigned struct_group_sz = sizeof(struct group);\n  unsigned siginfo_t_sz = sizeof(siginfo_t);\n  unsigned struct_sigaction_sz = sizeof(struct sigaction);\n  unsigned struct_stack_t_sz = sizeof(stack_t);\n  unsigned struct_itimerval_sz = sizeof(struct itimerval);\n  unsigned pthread_t_sz = sizeof(pthread_t);\n  unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);\n  unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);\n  unsigned pid_t_sz = sizeof(pid_t);\n  unsigned timeval_sz = sizeof(timeval);\n  unsigned uid_t_sz = sizeof(uid_t);\n  unsigned gid_t_sz = sizeof(gid_t);\n  unsigned mbstate_t_sz = sizeof(mbstate_t);\n  unsigned sigset_t_sz = sizeof(sigset_t);\n  unsigned struct_timezone_sz = sizeof(struct timezone);\n  unsigned struct_tms_sz = sizeof(struct tms);\n  unsigned struct_sigevent_sz = sizeof(struct sigevent);\n  unsigned struct_sched_param_sz = sizeof(struct sched_param);\n  unsigned struct_regex_sz = sizeof(regex_t);\n  unsigned struct_regmatch_sz = sizeof(regmatch_t);\n\n#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS\n  unsigned struct_statfs64_sz = sizeof(struct statfs64);\n#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS\n\n#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC\n  unsigned struct_fstab_sz = sizeof(struct fstab);\n#endif  // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||\n        // SANITIZER_MAC\n#if !SANITIZER_ANDROID\n  unsigned struct_statfs_sz = sizeof(struct statfs);\n  unsigned struct_sockaddr_sz = sizeof(struct sockaddr);\n\n  unsigned ucontext_t_sz(void *ctx) {\n#    if SANITIZER_GLIBC && SANITIZER_X64\n    // See kernel arch/x86/kernel/fpu/signal.c for details.\n    const auto *fpregs = static_cast<ucontext_t *>(ctx)->uc_mcontext.fpregs;\n    // The member names differ across header versions, but the actual layout\n    // is always the same.  So avoid using members, just use arithmetic.\n    const uint32_t *after_xmm =\n        reinterpret_cast<const uint32_t *>(fpregs + 1) - 24;\n    if (after_xmm[12] == FP_XSTATE_MAGIC1)\n      return reinterpret_cast<const char *>(fpregs) + after_xmm[13] -\n             static_cast<const char *>(ctx);\n#    endif\n    return sizeof(ucontext_t);\n  }\n#  endif  // !SANITIZER_ANDROID\n\n#  if SANITIZER_LINUX\n  unsigned struct_epoll_event_sz = sizeof(struct epoll_event);\n  unsigned struct_sysinfo_sz = sizeof(struct sysinfo);\n  unsigned __user_cap_header_struct_sz =\n      sizeof(struct __user_cap_header_struct);\n  unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);\n  unsigned struct_new_utsname_sz = sizeof(struct new_utsname);\n  unsigned struct_old_utsname_sz = sizeof(struct old_utsname);\n  unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_LINUX\n  unsigned struct_rlimit_sz = sizeof(struct rlimit);\n  unsigned struct_timespec_sz = sizeof(struct timespec);\n  unsigned struct_utimbuf_sz = sizeof(struct utimbuf);\n  unsigned struct_itimerspec_sz = sizeof(struct itimerspec);\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n  // Use pre-computed size of struct ustat to avoid <sys/ustat.h> which\n  // has been removed from glibc 2.28.\n#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) ||     \\\n    defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \\\n    defined(__x86_64__) || SANITIZER_RISCV64\n#define SIZEOF_STRUCT_USTAT 32\n#    elif defined(__arm__) || defined(__i386__) || defined(__mips__) ||    \\\n        defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \\\n        defined(__hexagon__)\n#      define SIZEOF_STRUCT_USTAT 20\n#    else\n#      error Unknown size of struct ustat\n#    endif\n  unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;\n  unsigned struct_rlimit64_sz = sizeof(struct rlimit64);\n  unsigned struct_statvfs64_sz = sizeof(struct statvfs64);\n  unsigned struct_crypt_data_sz = sizeof(struct crypt_data);\n#endif // SANITIZER_LINUX && !SANITIZER_ANDROID\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n  unsigned struct_timex_sz = sizeof(struct timex);\n  unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);\n  unsigned struct_mq_attr_sz = sizeof(struct mq_attr);\n  unsigned struct_statvfs_sz = sizeof(struct statvfs);\n#endif // SANITIZER_LINUX && !SANITIZER_ANDROID\n\n  const uptr sig_ign = (uptr)SIG_IGN;\n  const uptr sig_dfl = (uptr)SIG_DFL;\n  const uptr sig_err = (uptr)SIG_ERR;\n  const uptr sa_siginfo = (uptr)SA_SIGINFO;\n\n#if SANITIZER_LINUX\n  int e_tabsz = (int)E_TABSZ;\n#endif\n\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n  unsigned struct_shminfo_sz = sizeof(struct shminfo);\n  unsigned struct_shm_info_sz = sizeof(struct shm_info);\n  int shmctl_ipc_stat = (int)IPC_STAT;\n  int shmctl_ipc_info = (int)IPC_INFO;\n  int shmctl_shm_info = (int)SHM_INFO;\n  int shmctl_shm_stat = (int)SHM_STAT;\n#endif\n\n#if !SANITIZER_MAC && !SANITIZER_FREEBSD\n  unsigned struct_utmp_sz = sizeof(struct utmp);\n#endif\n#if !SANITIZER_ANDROID\n  unsigned struct_utmpx_sz = sizeof(struct utmpx);\n#endif\n\n  int map_fixed = MAP_FIXED;\n\n  int af_inet = (int)AF_INET;\n  int af_inet6 = (int)AF_INET6;\n\n  uptr __sanitizer_in_addr_sz(int af) {\n    if (af == AF_INET)\n      return sizeof(struct in_addr);\n    else if (af == AF_INET6)\n      return sizeof(struct in6_addr);\n    else\n      return 0;\n  }\n\n#if SANITIZER_LINUX\nunsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));\n#elif SANITIZER_FREEBSD\nunsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);\n#endif\n\n#if SANITIZER_GLIBC\n  int glob_nomatch = GLOB_NOMATCH;\n  int glob_altdirfunc = GLOB_ALTDIRFUNC;\n#endif\n\n#  if !SANITIZER_ANDROID\n  const int wordexp_wrde_dooffs = WRDE_DOOFFS;\n#  endif  // !SANITIZER_ANDROID\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID &&                               \\\n    (defined(__i386) || defined(__x86_64) || defined(__mips64) ||          \\\n     defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \\\n     defined(__s390__) || SANITIZER_RISCV64)\n#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)\n  unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);\n  unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);\n#elif SANITIZER_RISCV64\n  unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);\n  unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state);\n#elif defined(__aarch64__)\n  unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);\n  unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);\n#elif defined(__s390__)\n  unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);\n  unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);\n#else\n  unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);\n  unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);\n#endif // __mips64 || __powerpc64__ || __aarch64__\n#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \\\n    defined(__aarch64__) || defined(__arm__) || defined(__s390__) ||    \\\n    SANITIZER_RISCV64\n  unsigned struct_user_fpxregs_struct_sz = 0;\n#else\n  unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);\n#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__\n// || __s390__\n#ifdef __arm__\n  unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;\n#else\n  unsigned struct_user_vfpregs_struct_sz = 0;\n#endif\n\n  int ptrace_peektext = PTRACE_PEEKTEXT;\n  int ptrace_peekdata = PTRACE_PEEKDATA;\n  int ptrace_peekuser = PTRACE_PEEKUSER;\n#if (defined(PTRACE_GETREGS) && defined(PTRACE_SETREGS)) || \\\n    (defined(PT_GETREGS) && defined(PT_SETREGS))\n  int ptrace_getregs = PTRACE_GETREGS;\n  int ptrace_setregs = PTRACE_SETREGS;\n#else\n  int ptrace_getregs = -1;\n  int ptrace_setregs = -1;\n#endif\n#if (defined(PTRACE_GETFPREGS) && defined(PTRACE_SETFPREGS)) || \\\n    (defined(PT_GETFPREGS) && defined(PT_SETFPREGS))\n  int ptrace_getfpregs = PTRACE_GETFPREGS;\n  int ptrace_setfpregs = PTRACE_SETFPREGS;\n#else\n  int ptrace_getfpregs = -1;\n  int ptrace_setfpregs = -1;\n#endif\n#if (defined(PTRACE_GETFPXREGS) && defined(PTRACE_SETFPXREGS)) || \\\n    (defined(PT_GETFPXREGS) && defined(PT_SETFPXREGS))\n  int ptrace_getfpxregs = PTRACE_GETFPXREGS;\n  int ptrace_setfpxregs = PTRACE_SETFPXREGS;\n#else\n  int ptrace_getfpxregs = -1;\n  int ptrace_setfpxregs = -1;\n#endif // PTRACE_GETFPXREGS/PTRACE_SETFPXREGS\n#if defined(PTRACE_GETVFPREGS) && defined(PTRACE_SETVFPREGS)\n  int ptrace_getvfpregs = PTRACE_GETVFPREGS;\n  int ptrace_setvfpregs = PTRACE_SETVFPREGS;\n#else\n  int ptrace_getvfpregs = -1;\n  int ptrace_setvfpregs = -1;\n#endif\n  int ptrace_geteventmsg = PTRACE_GETEVENTMSG;\n#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) ||              \\\n    (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))\n  int ptrace_getsiginfo = PTRACE_GETSIGINFO;\n  int ptrace_setsiginfo = PTRACE_SETSIGINFO;\n#else\n  int ptrace_getsiginfo = -1;\n  int ptrace_setsiginfo = -1;\n#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO\n#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)\n  int ptrace_getregset = PTRACE_GETREGSET;\n  int ptrace_setregset = PTRACE_SETREGSET;\n#else\n  int ptrace_getregset = -1;\n  int ptrace_setregset = -1;\n#endif // PTRACE_GETREGSET/PTRACE_SETREGSET\n#endif\n\n  unsigned path_max = PATH_MAX;\n\n  // ioctl arguments\n  unsigned struct_ifreq_sz = sizeof(struct ifreq);\n  unsigned struct_termios_sz = sizeof(struct termios);\n  unsigned struct_winsize_sz = sizeof(struct winsize);\n\n#if SANITIZER_LINUX\n  unsigned struct_arpreq_sz = sizeof(struct arpreq);\n  unsigned struct_cdrom_msf_sz = sizeof(struct cdrom_msf);\n  unsigned struct_cdrom_multisession_sz = sizeof(struct cdrom_multisession);\n  unsigned struct_cdrom_read_audio_sz = sizeof(struct cdrom_read_audio);\n  unsigned struct_cdrom_subchnl_sz = sizeof(struct cdrom_subchnl);\n  unsigned struct_cdrom_ti_sz = sizeof(struct cdrom_ti);\n  unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry);\n  unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr);\n  unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl);\n  unsigned struct_ff_effect_sz = sizeof(struct ff_effect);\n  unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params);\n  unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct);\n  unsigned struct_floppy_fdc_state_sz = sizeof(struct floppy_fdc_state);\n  unsigned struct_floppy_max_errors_sz = sizeof(struct floppy_max_errors);\n  unsigned struct_floppy_raw_cmd_sz = sizeof(struct floppy_raw_cmd);\n  unsigned struct_floppy_struct_sz = sizeof(struct floppy_struct);\n  unsigned struct_floppy_write_errors_sz = sizeof(struct floppy_write_errors);\n  unsigned struct_format_descr_sz = sizeof(struct format_descr);\n  unsigned struct_hd_driveid_sz = sizeof(struct hd_driveid);\n  unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry);\n  unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo);\n  unsigned struct_input_id_sz = sizeof(struct input_id);\n  unsigned struct_mtpos_sz = sizeof(struct mtpos);\n  unsigned struct_rtentry_sz = sizeof(struct rtentry);\n#if SANITIZER_GLIBC || SANITIZER_ANDROID\n  unsigned struct_termio_sz = sizeof(struct termio);\n#endif\n  unsigned struct_vt_consize_sz = sizeof(struct vt_consize);\n  unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);\n  unsigned struct_vt_stat_sz = sizeof(struct vt_stat);\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_LINUX\n#if SOUND_VERSION >= 0x040000\n  unsigned struct_copr_buffer_sz = 0;\n  unsigned struct_copr_debug_buf_sz = 0;\n  unsigned struct_copr_msg_sz = 0;\n#else\n  unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);\n  unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);\n  unsigned struct_copr_msg_sz = sizeof(struct copr_msg);\n#endif\n  unsigned struct_midi_info_sz = sizeof(struct midi_info);\n  unsigned struct_mtget_sz = sizeof(struct mtget);\n  unsigned struct_mtop_sz = sizeof(struct mtop);\n  unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);\n  unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);\n  unsigned struct_synth_info_sz = sizeof(struct synth_info);\n  unsigned struct_vt_mode_sz = sizeof(struct vt_mode);\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_GLIBC\n  unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);\n#if EV_VERSION > (0x010000)\n  unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry);\n#else\n  unsigned struct_input_keymap_entry_sz = 0;\n#endif\n  unsigned struct_ipx_config_data_sz = sizeof(struct ipx_config_data);\n  unsigned struct_kbdiacrs_sz = sizeof(struct kbdiacrs);\n  unsigned struct_kbentry_sz = sizeof(struct kbentry);\n  unsigned struct_kbkeycode_sz = sizeof(struct kbkeycode);\n  unsigned struct_kbsentry_sz = sizeof(struct kbsentry);\n  unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo);\n  unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct);\n  unsigned struct_scc_modem_sz = sizeof(struct scc_modem);\n  unsigned struct_scc_stat_sz = sizeof(struct scc_stat);\n  unsigned struct_serial_multiport_struct_sz\n      = sizeof(struct serial_multiport_struct);\n  unsigned struct_serial_struct_sz = sizeof(struct serial_struct);\n  unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);\n  unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);\n  unsigned struct_unimapinit_sz = sizeof(struct unimapinit);\n\n  unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);\n  unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);\n#endif  // SANITIZER_GLIBC\n\n#if !SANITIZER_ANDROID && !SANITIZER_MAC\n  unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);\n  unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);\n#endif\n\n  const unsigned long __sanitizer_bufsiz = BUFSIZ;\n\n  const unsigned IOCTL_NOT_PRESENT = 0;\n\n  unsigned IOCTL_FIOASYNC = FIOASYNC;\n  unsigned IOCTL_FIOCLEX = FIOCLEX;\n  unsigned IOCTL_FIOGETOWN = FIOGETOWN;\n  unsigned IOCTL_FIONBIO = FIONBIO;\n  unsigned IOCTL_FIONCLEX = FIONCLEX;\n  unsigned IOCTL_FIOSETOWN = FIOSETOWN;\n  unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;\n  unsigned IOCTL_SIOCATMARK = SIOCATMARK;\n  unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;\n  unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;\n  unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;\n  unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;\n  unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;\n  unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;\n  unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;\n  unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;\n  unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;\n  unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;\n  unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;\n  unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;\n  unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;\n  unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;\n  unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;\n  unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;\n  unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;\n  unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;\n  unsigned IOCTL_TIOCCONS = TIOCCONS;\n  unsigned IOCTL_TIOCEXCL = TIOCEXCL;\n  unsigned IOCTL_TIOCGETD = TIOCGETD;\n  unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;\n  unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;\n  unsigned IOCTL_TIOCMBIC = TIOCMBIC;\n  unsigned IOCTL_TIOCMBIS = TIOCMBIS;\n  unsigned IOCTL_TIOCMGET = TIOCMGET;\n  unsigned IOCTL_TIOCMSET = TIOCMSET;\n  unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;\n  unsigned IOCTL_TIOCNXCL = TIOCNXCL;\n  unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;\n  unsigned IOCTL_TIOCPKT = TIOCPKT;\n  unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;\n  unsigned IOCTL_TIOCSETD = TIOCSETD;\n  unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;\n  unsigned IOCTL_TIOCSTI = TIOCSTI;\n  unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n  unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;\n  unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;\n#endif\n\n#if SANITIZER_LINUX\n  unsigned IOCTL_EVIOCGABS = EVIOCGABS(0);\n  unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0);\n  unsigned IOCTL_EVIOCGEFFECTS = EVIOCGEFFECTS;\n  unsigned IOCTL_EVIOCGID = EVIOCGID;\n  unsigned IOCTL_EVIOCGKEY = EVIOCGKEY(0);\n  unsigned IOCTL_EVIOCGKEYCODE = EVIOCGKEYCODE;\n  unsigned IOCTL_EVIOCGLED = EVIOCGLED(0);\n  unsigned IOCTL_EVIOCGNAME = EVIOCGNAME(0);\n  unsigned IOCTL_EVIOCGPHYS = EVIOCGPHYS(0);\n  unsigned IOCTL_EVIOCGRAB = EVIOCGRAB;\n  unsigned IOCTL_EVIOCGREP = EVIOCGREP;\n  unsigned IOCTL_EVIOCGSND = EVIOCGSND(0);\n  unsigned IOCTL_EVIOCGSW = EVIOCGSW(0);\n  unsigned IOCTL_EVIOCGUNIQ = EVIOCGUNIQ(0);\n  unsigned IOCTL_EVIOCGVERSION = EVIOCGVERSION;\n  unsigned IOCTL_EVIOCRMFF = EVIOCRMFF;\n  unsigned IOCTL_EVIOCSABS = EVIOCSABS(0);\n  unsigned IOCTL_EVIOCSFF = EVIOCSFF;\n  unsigned IOCTL_EVIOCSKEYCODE = EVIOCSKEYCODE;\n  unsigned IOCTL_EVIOCSREP = EVIOCSREP;\n  unsigned IOCTL_BLKFLSBUF = BLKFLSBUF;\n  unsigned IOCTL_BLKGETSIZE = BLKGETSIZE;\n  unsigned IOCTL_BLKRAGET = BLKRAGET;\n  unsigned IOCTL_BLKRASET = BLKRASET;\n  unsigned IOCTL_BLKROGET = BLKROGET;\n  unsigned IOCTL_BLKROSET = BLKROSET;\n  unsigned IOCTL_BLKRRPART = BLKRRPART;\n  unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;\n  unsigned IOCTL_CDROMEJECT = CDROMEJECT;\n  unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;\n  unsigned IOCTL_CDROMMULTISESSION = CDROMMULTISESSION;\n  unsigned IOCTL_CDROMPAUSE = CDROMPAUSE;\n  unsigned IOCTL_CDROMPLAYMSF = CDROMPLAYMSF;\n  unsigned IOCTL_CDROMPLAYTRKIND = CDROMPLAYTRKIND;\n  unsigned IOCTL_CDROMREADAUDIO = CDROMREADAUDIO;\n  unsigned IOCTL_CDROMREADCOOKED = CDROMREADCOOKED;\n  unsigned IOCTL_CDROMREADMODE1 = CDROMREADMODE1;\n  unsigned IOCTL_CDROMREADMODE2 = CDROMREADMODE2;\n  unsigned IOCTL_CDROMREADRAW = CDROMREADRAW;\n  unsigned IOCTL_CDROMREADTOCENTRY = CDROMREADTOCENTRY;\n  unsigned IOCTL_CDROMREADTOCHDR = CDROMREADTOCHDR;\n  unsigned IOCTL_CDROMRESET = CDROMRESET;\n  unsigned IOCTL_CDROMRESUME = CDROMRESUME;\n  unsigned IOCTL_CDROMSEEK = CDROMSEEK;\n  unsigned IOCTL_CDROMSTART = CDROMSTART;\n  unsigned IOCTL_CDROMSTOP = CDROMSTOP;\n  unsigned IOCTL_CDROMSUBCHNL = CDROMSUBCHNL;\n  unsigned IOCTL_CDROMVOLCTRL = CDROMVOLCTRL;\n  unsigned IOCTL_CDROMVOLREAD = CDROMVOLREAD;\n  unsigned IOCTL_CDROM_GET_UPC = CDROM_GET_UPC;\n  unsigned IOCTL_FDCLRPRM = FDCLRPRM;\n  unsigned IOCTL_FDDEFPRM = FDDEFPRM;\n  unsigned IOCTL_FDFLUSH = FDFLUSH;\n  unsigned IOCTL_FDFMTBEG = FDFMTBEG;\n  unsigned IOCTL_FDFMTEND = FDFMTEND;\n  unsigned IOCTL_FDFMTTRK = FDFMTTRK;\n  unsigned IOCTL_FDGETDRVPRM = FDGETDRVPRM;\n  unsigned IOCTL_FDGETDRVSTAT = FDGETDRVSTAT;\n  unsigned IOCTL_FDGETDRVTYP = FDGETDRVTYP;\n  unsigned IOCTL_FDGETFDCSTAT = FDGETFDCSTAT;\n  unsigned IOCTL_FDGETMAXERRS = FDGETMAXERRS;\n  unsigned IOCTL_FDGETPRM = FDGETPRM;\n  unsigned IOCTL_FDMSGOFF = FDMSGOFF;\n  unsigned IOCTL_FDMSGON = FDMSGON;\n  unsigned IOCTL_FDPOLLDRVSTAT = FDPOLLDRVSTAT;\n  unsigned IOCTL_FDRAWCMD = FDRAWCMD;\n  unsigned IOCTL_FDRESET = FDRESET;\n  unsigned IOCTL_FDSETDRVPRM = FDSETDRVPRM;\n  unsigned IOCTL_FDSETEMSGTRESH = FDSETEMSGTRESH;\n  unsigned IOCTL_FDSETMAXERRS = FDSETMAXERRS;\n  unsigned IOCTL_FDSETPRM = FDSETPRM;\n  unsigned IOCTL_FDTWADDLE = FDTWADDLE;\n  unsigned IOCTL_FDWERRORCLR = FDWERRORCLR;\n  unsigned IOCTL_FDWERRORGET = FDWERRORGET;\n  unsigned IOCTL_HDIO_DRIVE_CMD = HDIO_DRIVE_CMD;\n  unsigned IOCTL_HDIO_GETGEO = HDIO_GETGEO;\n  unsigned IOCTL_HDIO_GET_32BIT = HDIO_GET_32BIT;\n  unsigned IOCTL_HDIO_GET_DMA = HDIO_GET_DMA;\n  unsigned IOCTL_HDIO_GET_IDENTITY = HDIO_GET_IDENTITY;\n  unsigned IOCTL_HDIO_GET_KEEPSETTINGS = HDIO_GET_KEEPSETTINGS;\n  unsigned IOCTL_HDIO_GET_MULTCOUNT = HDIO_GET_MULTCOUNT;\n  unsigned IOCTL_HDIO_GET_NOWERR = HDIO_GET_NOWERR;\n  unsigned IOCTL_HDIO_GET_UNMASKINTR = HDIO_GET_UNMASKINTR;\n  unsigned IOCTL_HDIO_SET_32BIT = HDIO_SET_32BIT;\n  unsigned IOCTL_HDIO_SET_DMA = HDIO_SET_DMA;\n  unsigned IOCTL_HDIO_SET_KEEPSETTINGS = HDIO_SET_KEEPSETTINGS;\n  unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT;\n  unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR;\n  unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR;\n  unsigned IOCTL_MTIOCPOS = MTIOCPOS;\n  unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;\n  unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG;\n  unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;\n  unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT;\n  unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP;\n  unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP;\n  unsigned IOCTL_PPPIOCSDEBUG = PPPIOCSDEBUG;\n  unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS;\n  unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;\n  unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;\n  unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;\n  unsigned IOCTL_SIOCADDRT = SIOCADDRT;\n  unsigned IOCTL_SIOCDARP = SIOCDARP;\n  unsigned IOCTL_SIOCDELRT = SIOCDELRT;\n  unsigned IOCTL_SIOCDRARP = SIOCDRARP;\n  unsigned IOCTL_SIOCGARP = SIOCGARP;\n  unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP;\n  unsigned IOCTL_SIOCGIFHWADDR = SIOCGIFHWADDR;\n  unsigned IOCTL_SIOCGIFMAP = SIOCGIFMAP;\n  unsigned IOCTL_SIOCGIFMEM = SIOCGIFMEM;\n  unsigned IOCTL_SIOCGIFNAME = SIOCGIFNAME;\n  unsigned IOCTL_SIOCGIFSLAVE = SIOCGIFSLAVE;\n  unsigned IOCTL_SIOCGRARP = SIOCGRARP;\n  unsigned IOCTL_SIOCGSTAMP = SIOCGSTAMP;\n  unsigned IOCTL_SIOCSARP = SIOCSARP;\n  unsigned IOCTL_SIOCSIFENCAP = SIOCSIFENCAP;\n  unsigned IOCTL_SIOCSIFHWADDR = SIOCSIFHWADDR;\n  unsigned IOCTL_SIOCSIFLINK = SIOCSIFLINK;\n  unsigned IOCTL_SIOCSIFMAP = SIOCSIFMAP;\n  unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM;\n  unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE;\n  unsigned IOCTL_SIOCSRARP = SIOCSRARP;\n# if SOUND_VERSION >= 0x040000\n  unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_RCVMSG = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_RDATA = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_RESET = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_RUN = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_SENDMSG = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_WCODE = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SNDCTL_COPR_WDATA = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SOUND_PCM_READ_BITS = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SOUND_PCM_READ_CHANNELS = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SOUND_PCM_READ_FILTER = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT;\n# else  // SOUND_VERSION\n  unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT;\n  unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD;\n  unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE;\n  unsigned IOCTL_SNDCTL_COPR_RCVMSG = SNDCTL_COPR_RCVMSG;\n  unsigned IOCTL_SNDCTL_COPR_RDATA = SNDCTL_COPR_RDATA;\n  unsigned IOCTL_SNDCTL_COPR_RESET = SNDCTL_COPR_RESET;\n  unsigned IOCTL_SNDCTL_COPR_RUN = SNDCTL_COPR_RUN;\n  unsigned IOCTL_SNDCTL_COPR_SENDMSG = SNDCTL_COPR_SENDMSG;\n  unsigned IOCTL_SNDCTL_COPR_WCODE = SNDCTL_COPR_WCODE;\n  unsigned IOCTL_SNDCTL_COPR_WDATA = SNDCTL_COPR_WDATA;\n  unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS;\n  unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS;\n  unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER;\n  unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;\n  unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS;\n  unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;\n#endif // SOUND_VERSION\n  unsigned IOCTL_TCFLSH = TCFLSH;\n  unsigned IOCTL_TCGETA = TCGETA;\n  unsigned IOCTL_TCGETS = TCGETS;\n  unsigned IOCTL_TCSBRK = TCSBRK;\n  unsigned IOCTL_TCSBRKP = TCSBRKP;\n  unsigned IOCTL_TCSETA = TCSETA;\n  unsigned IOCTL_TCSETAF = TCSETAF;\n  unsigned IOCTL_TCSETAW = TCSETAW;\n  unsigned IOCTL_TCSETS = TCSETS;\n  unsigned IOCTL_TCSETSF = TCSETSF;\n  unsigned IOCTL_TCSETSW = TCSETSW;\n  unsigned IOCTL_TCXONC = TCXONC;\n  unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;\n  unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;\n  unsigned IOCTL_TIOCINQ = TIOCINQ;\n  unsigned IOCTL_TIOCLINUX = TIOCLINUX;\n  unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;\n  unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;\n  unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;\n  unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;\n  unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;\n  unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;\n  unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;\n  unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;\n  unsigned IOCTL_VT_RESIZE = VT_RESIZE;\n  unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;\n  unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;\n  unsigned IOCTL_MTIOCGET = MTIOCGET;\n  unsigned IOCTL_MTIOCTOP = MTIOCTOP;\n  unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;\n  unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;\n  unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;\n  unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;\n  unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;\n  unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;\n  unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;\n  unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;\n  unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;\n  unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;\n  unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;\n  unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE;\n  unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR;\n  unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO;\n  unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME;\n  unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE;\n  unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT;\n  unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT;\n  unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS;\n  unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS;\n  unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND;\n  unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC;\n  unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE;\n  unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET;\n  unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES;\n  unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC;\n  unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI;\n  unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD;\n  unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO;\n  unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL;\n  unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE;\n  unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME;\n  unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT;\n  unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE;\n  unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START;\n  unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP;\n  unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO;\n  unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE;\n  unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM;\n  unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS;\n  unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS;\n  unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD;\n  unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK;\n  unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE;\n  unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN;\n  unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX;\n  unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE;\n  unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1;\n  unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2;\n  unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3;\n  unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD;\n  unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC;\n  unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE;\n  unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN;\n  unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM;\n  unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV;\n  unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK;\n  unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC;\n  unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER;\n  unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS;\n  unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH;\n  unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE;\n  unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME;\n  unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM;\n  unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS;\n  unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD;\n  unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE;\n  unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN;\n  unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX;\n  unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE;\n  unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1;\n  unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2;\n  unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3;\n  unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD;\n  unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC;\n  unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE;\n  unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN;\n  unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM;\n  unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV;\n  unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC;\n  unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER;\n  unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;\n  unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;\n  unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;\n  unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;\n  unsigned IOCTL_VT_GETMODE = VT_GETMODE;\n  unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;\n  unsigned IOCTL_VT_RELDISP = VT_RELDISP;\n  unsigned IOCTL_VT_SETMODE = VT_SETMODE;\n  unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n  unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE;\n  unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE;\n  unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG;\n  unsigned IOCTL_EQL_GETSLAVECFG = EQL_GETSLAVECFG;\n  unsigned IOCTL_EQL_SETMASTRCFG = EQL_SETMASTRCFG;\n  unsigned IOCTL_EQL_SETSLAVECFG = EQL_SETSLAVECFG;\n#if EV_VERSION > (0x010000)\n  unsigned IOCTL_EVIOCGKEYCODE_V2 = EVIOCGKEYCODE_V2;\n  unsigned IOCTL_EVIOCGPROP = EVIOCGPROP(0);\n  unsigned IOCTL_EVIOCSKEYCODE_V2 = EVIOCSKEYCODE_V2;\n#else\n  unsigned IOCTL_EVIOCGKEYCODE_V2 = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT;\n  unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT;\n#endif\n  unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS;\n  unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION;\n  unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS;\n  unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;\n  unsigned IOCTL_GIO_CMAP = GIO_CMAP;\n  unsigned IOCTL_GIO_FONT = GIO_FONT;\n  unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;\n  unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP;\n  unsigned IOCTL_KDADDIO = KDADDIO;\n  unsigned IOCTL_KDDELIO = KDDELIO;\n  unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE;\n  unsigned IOCTL_KDGKBDIACR = KDGKBDIACR;\n  unsigned IOCTL_KDGKBENT = KDGKBENT;\n  unsigned IOCTL_KDGKBLED = KDGKBLED;\n  unsigned IOCTL_KDGKBMETA = KDGKBMETA;\n  unsigned IOCTL_KDGKBSENT = KDGKBSENT;\n  unsigned IOCTL_KDMAPDISP = KDMAPDISP;\n  unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE;\n  unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT;\n  unsigned IOCTL_KDSKBDIACR = KDSKBDIACR;\n  unsigned IOCTL_KDSKBENT = KDSKBENT;\n  unsigned IOCTL_KDSKBLED = KDSKBLED;\n  unsigned IOCTL_KDSKBMETA = KDSKBMETA;\n  unsigned IOCTL_KDSKBSENT = KDSKBSENT;\n  unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP;\n  unsigned IOCTL_LPABORT = LPABORT;\n  unsigned IOCTL_LPABORTOPEN = LPABORTOPEN;\n  unsigned IOCTL_LPCAREFUL = LPCAREFUL;\n  unsigned IOCTL_LPCHAR = LPCHAR;\n  unsigned IOCTL_LPGETIRQ = LPGETIRQ;\n  unsigned IOCTL_LPGETSTATUS = LPGETSTATUS;\n  unsigned IOCTL_LPRESET = LPRESET;\n  unsigned IOCTL_LPSETIRQ = LPSETIRQ;\n  unsigned IOCTL_LPTIME = LPTIME;\n  unsigned IOCTL_LPWAIT = LPWAIT;\n  unsigned IOCTL_MTIOCGETCONFIG = MTIOCGETCONFIG;\n  unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG;\n  unsigned IOCTL_PIO_CMAP = PIO_CMAP;\n  unsigned IOCTL_PIO_FONT = PIO_FONT;\n  unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;\n  unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;\n  unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;\n#if SANITIZER_GLIBC\n  unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN;\n  unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST;\n  unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE;\n  unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE = SCSI_IOCTL_TAGGED_ENABLE;\n  unsigned IOCTL_SIOCAIPXITFCRT = SIOCAIPXITFCRT;\n  unsigned IOCTL_SIOCAIPXPRISLT = SIOCAIPXPRISLT;\n  unsigned IOCTL_SIOCAX25ADDUID = SIOCAX25ADDUID;\n  unsigned IOCTL_SIOCAX25DELUID = SIOCAX25DELUID;\n  unsigned IOCTL_SIOCAX25GETPARMS = SIOCAX25GETPARMS;\n  unsigned IOCTL_SIOCAX25GETUID = SIOCAX25GETUID;\n  unsigned IOCTL_SIOCAX25NOUID = SIOCAX25NOUID;\n  unsigned IOCTL_SIOCAX25SETPARMS = SIOCAX25SETPARMS;\n  unsigned IOCTL_SIOCDEVPLIP = SIOCDEVPLIP;\n  unsigned IOCTL_SIOCIPXCFGDATA = SIOCIPXCFGDATA;\n  unsigned IOCTL_SIOCNRDECOBS = SIOCNRDECOBS;\n  unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;\n  unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;\n  unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;\n#endif\n  unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;\n  unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;\n  unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;\n  unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;\n#endif // SANITIZER_LINUX && !SANITIZER_ANDROID\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n  unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;\n  unsigned IOCTL_KDDISABIO = KDDISABIO;\n  unsigned IOCTL_KDENABIO = KDENABIO;\n  unsigned IOCTL_KDGETLED = KDGETLED;\n  unsigned IOCTL_KDGETMODE = KDGETMODE;\n  unsigned IOCTL_KDGKBMODE = KDGKBMODE;\n  unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;\n  unsigned IOCTL_KDMKTONE = KDMKTONE;\n  unsigned IOCTL_KDSETLED = KDSETLED;\n  unsigned IOCTL_KDSETMODE = KDSETMODE;\n  unsigned IOCTL_KDSKBMODE = KDSKBMODE;\n  unsigned IOCTL_KIOCSOUND = KIOCSOUND;\n  unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;\n  unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;\n  unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;\n#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID\n\n  const int si_SEGV_MAPERR = SEGV_MAPERR;\n  const int si_SEGV_ACCERR = SEGV_ACCERR;\n} // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nCOMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));\n\nCOMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));\nCHECK_TYPE_SIZE(pthread_key_t);\n\n#if SANITIZER_LINUX\n// FIXME: We define those on Linux and Mac, but only check on Linux.\nCOMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS);\nCOMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS);\nCOMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS);\nCOMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS);\nCOMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK);\nCOMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK);\nCOMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK);\nCOMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK);\nCOMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT);\nCOMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT);\nCOMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT);\nCOMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT);\nCOMPILER_CHECK(IOC_NONE == _IOC_NONE);\nCOMPILER_CHECK(IOC_WRITE == _IOC_WRITE);\nCOMPILER_CHECK(IOC_READ == _IOC_READ);\nCOMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX);\nCOMPILER_CHECK(EVIOC_EV_MAX == EV_MAX);\nCOMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));\nCOMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678));\nCOMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678));\nCOMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678));\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD\n// There are more undocumented fields in dl_phdr_info that we are not interested\n// in.\nCOMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);\n#endif // SANITIZER_LINUX || SANITIZER_FREEBSD\n\n#if SANITIZER_GLIBC || SANITIZER_FREEBSD\nCHECK_TYPE_SIZE(glob_t);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_offs);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_flags);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_stat);\n#endif  // SANITIZER_GLIBC || SANITIZER_FREEBSD\n\nCHECK_TYPE_SIZE(addrinfo);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_family);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);\n\nCHECK_TYPE_SIZE(hostent);\nCHECK_SIZE_AND_OFFSET(hostent, h_name);\nCHECK_SIZE_AND_OFFSET(hostent, h_aliases);\nCHECK_SIZE_AND_OFFSET(hostent, h_addrtype);\nCHECK_SIZE_AND_OFFSET(hostent, h_length);\nCHECK_SIZE_AND_OFFSET(hostent, h_addr_list);\n\nCHECK_TYPE_SIZE(iovec);\nCHECK_SIZE_AND_OFFSET(iovec, iov_base);\nCHECK_SIZE_AND_OFFSET(iovec, iov_len);\n\n// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but\n// many implementations don't conform to the standard. Since we pick the\n// non-conforming glibc definition, exclude the checks for musl (incompatible\n// sizes but compatible offsets).\nCHECK_TYPE_SIZE(msghdr);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_name);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iov);\n#if SANITIZER_GLIBC || SANITIZER_ANDROID\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);\n#endif\nCHECK_SIZE_AND_OFFSET(msghdr, msg_control);\n#if SANITIZER_GLIBC || SANITIZER_ANDROID\nCHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);\n#endif\nCHECK_SIZE_AND_OFFSET(msghdr, msg_flags);\n\nCHECK_TYPE_SIZE(cmsghdr);\n#if SANITIZER_GLIBC || SANITIZER_ANDROID\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);\n#endif\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);\n\n#if SANITIZER_LINUX && (__ANDROID_API__ >= 21 || __GLIBC_PREREQ (2, 14))\nCHECK_TYPE_SIZE(mmsghdr);\nCHECK_SIZE_AND_OFFSET(mmsghdr, msg_hdr);\nCHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);\n#endif\n\nCOMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));\nCHECK_SIZE_AND_OFFSET(dirent, d_ino);\n#if SANITIZER_MAC\nCHECK_SIZE_AND_OFFSET(dirent, d_seekoff);\n#elif SANITIZER_FREEBSD\n// There is no 'd_off' field on FreeBSD.\n#else\nCHECK_SIZE_AND_OFFSET(dirent, d_off);\n#endif\nCHECK_SIZE_AND_OFFSET(dirent, d_reclen);\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nCOMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));\nCHECK_SIZE_AND_OFFSET(dirent64, d_ino);\nCHECK_SIZE_AND_OFFSET(dirent64, d_off);\nCHECK_SIZE_AND_OFFSET(dirent64, d_reclen);\n#endif\n\nCHECK_TYPE_SIZE(ifconf);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_len);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);\n\nCHECK_TYPE_SIZE(pollfd);\nCHECK_SIZE_AND_OFFSET(pollfd, fd);\nCHECK_SIZE_AND_OFFSET(pollfd, events);\nCHECK_SIZE_AND_OFFSET(pollfd, revents);\n\nCHECK_TYPE_SIZE(nfds_t);\n\nCHECK_TYPE_SIZE(sigset_t);\n\nCOMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));\n// Can't write checks for sa_handler and sa_sigaction due to them being\n// preprocessor macros.\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);\n#if !defined(__s390x__) || __GLIBC_PREREQ (2, 20)\n// On s390x glibc 2.19 and earlier sa_flags was unsigned long, and sa_resv\n// didn't exist.\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);\n#endif\n#if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32)\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);\n#endif\n\n#if SANITIZER_LINUX\nCHECK_TYPE_SIZE(__sysctl_args);\nCHECK_SIZE_AND_OFFSET(__sysctl_args, name);\nCHECK_SIZE_AND_OFFSET(__sysctl_args, nlen);\nCHECK_SIZE_AND_OFFSET(__sysctl_args, oldval);\nCHECK_SIZE_AND_OFFSET(__sysctl_args, oldlenp);\nCHECK_SIZE_AND_OFFSET(__sysctl_args, newval);\nCHECK_SIZE_AND_OFFSET(__sysctl_args, newlen);\n\nCHECK_TYPE_SIZE(__kernel_uid_t);\nCHECK_TYPE_SIZE(__kernel_gid_t);\n\n#if SANITIZER_USES_UID16_SYSCALLS\nCHECK_TYPE_SIZE(__kernel_old_uid_t);\nCHECK_TYPE_SIZE(__kernel_old_gid_t);\n#endif\n\nCHECK_TYPE_SIZE(__kernel_off_t);\nCHECK_TYPE_SIZE(__kernel_loff_t);\nCHECK_TYPE_SIZE(__kernel_fd_set);\n#endif\n\n#if !SANITIZER_ANDROID\nCHECK_TYPE_SIZE(wordexp_t);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);\n#endif\n\nCHECK_TYPE_SIZE(tm);\nCHECK_SIZE_AND_OFFSET(tm, tm_sec);\nCHECK_SIZE_AND_OFFSET(tm, tm_min);\nCHECK_SIZE_AND_OFFSET(tm, tm_hour);\nCHECK_SIZE_AND_OFFSET(tm, tm_mday);\nCHECK_SIZE_AND_OFFSET(tm, tm_mon);\nCHECK_SIZE_AND_OFFSET(tm, tm_year);\nCHECK_SIZE_AND_OFFSET(tm, tm_wday);\nCHECK_SIZE_AND_OFFSET(tm, tm_yday);\nCHECK_SIZE_AND_OFFSET(tm, tm_isdst);\nCHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);\nCHECK_SIZE_AND_OFFSET(tm, tm_zone);\n\n#if SANITIZER_LINUX\nCHECK_TYPE_SIZE(mntent);\nCHECK_SIZE_AND_OFFSET(mntent, mnt_fsname);\nCHECK_SIZE_AND_OFFSET(mntent, mnt_dir);\nCHECK_SIZE_AND_OFFSET(mntent, mnt_type);\nCHECK_SIZE_AND_OFFSET(mntent, mnt_opts);\nCHECK_SIZE_AND_OFFSET(mntent, mnt_freq);\nCHECK_SIZE_AND_OFFSET(mntent, mnt_passno);\n#endif\n\nCHECK_TYPE_SIZE(ether_addr);\n\n#if SANITIZER_GLIBC || SANITIZER_FREEBSD\nCHECK_TYPE_SIZE(ipc_perm);\n# if SANITIZER_FREEBSD\nCHECK_SIZE_AND_OFFSET(ipc_perm, key);\nCHECK_SIZE_AND_OFFSET(ipc_perm, seq);\n# else\nCHECK_SIZE_AND_OFFSET(ipc_perm, __key);\nCHECK_SIZE_AND_OFFSET(ipc_perm, __seq);\n# endif\nCHECK_SIZE_AND_OFFSET(ipc_perm, uid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, gid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cuid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cgid);\n#if !SANITIZER_LINUX || __GLIBC_PREREQ (2, 31)\n/* glibc 2.30 and earlier provided 16-bit mode field instead of 32-bit\n   on many architectures.  */\nCHECK_SIZE_AND_OFFSET(ipc_perm, mode);\n#endif\n\nCHECK_TYPE_SIZE(shmid_ds);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);\n#endif\n\nCHECK_TYPE_SIZE(clock_t);\n\n#if SANITIZER_LINUX\nCHECK_TYPE_SIZE(clockid_t);\n#endif\n\n#if !SANITIZER_ANDROID\nCHECK_TYPE_SIZE(ifaddrs);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);\n#if SANITIZER_LINUX || SANITIZER_FREEBSD\n// Compare against the union, because we can't reach into the union in a\n// compliant way.\n#ifdef ifa_dstaddr\n#undef ifa_dstaddr\n#endif\n# if SANITIZER_FREEBSD\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);\n# else\nCOMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==\n               sizeof(((ifaddrs *)nullptr)->ifa_ifu));\nCOMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==\n               offsetof(ifaddrs, ifa_ifu));\n# endif // SANITIZER_FREEBSD\n#else\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);\n#endif // SANITIZER_LINUX\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);\n#endif\n\n#if SANITIZER_GLIBC || SANITIZER_ANDROID\nCOMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo));\n#endif\n\n#if !SANITIZER_ANDROID\nCHECK_TYPE_SIZE(timeb);\nCHECK_SIZE_AND_OFFSET(timeb, time);\nCHECK_SIZE_AND_OFFSET(timeb, millitm);\nCHECK_SIZE_AND_OFFSET(timeb, timezone);\nCHECK_SIZE_AND_OFFSET(timeb, dstflag);\n#endif\n\nCHECK_TYPE_SIZE(passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_name);\nCHECK_SIZE_AND_OFFSET(passwd, pw_passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_uid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_gid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_dir);\nCHECK_SIZE_AND_OFFSET(passwd, pw_shell);\n\n#if !SANITIZER_ANDROID\nCHECK_SIZE_AND_OFFSET(passwd, pw_gecos);\n#endif\n\n#if SANITIZER_MAC\nCHECK_SIZE_AND_OFFSET(passwd, pw_change);\nCHECK_SIZE_AND_OFFSET(passwd, pw_expire);\nCHECK_SIZE_AND_OFFSET(passwd, pw_class);\n#endif\n\n\nCHECK_TYPE_SIZE(group);\nCHECK_SIZE_AND_OFFSET(group, gr_name);\nCHECK_SIZE_AND_OFFSET(group, gr_passwd);\nCHECK_SIZE_AND_OFFSET(group, gr_gid);\nCHECK_SIZE_AND_OFFSET(group, gr_mem);\n\n#if HAVE_RPC_XDR_H\nCHECK_TYPE_SIZE(XDR);\nCHECK_SIZE_AND_OFFSET(XDR, x_op);\nCHECK_SIZE_AND_OFFSET(XDR, x_ops);\nCHECK_SIZE_AND_OFFSET(XDR, x_public);\nCHECK_SIZE_AND_OFFSET(XDR, x_private);\nCHECK_SIZE_AND_OFFSET(XDR, x_base);\nCHECK_SIZE_AND_OFFSET(XDR, x_handy);\nCOMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);\nCOMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);\nCOMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);\n#endif\n\n#if SANITIZER_GLIBC\nCOMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));\nCHECK_SIZE_AND_OFFSET(FILE, _flags);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_read_end);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_read_base);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_write_end);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_write_base);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_save_base);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base);\nCHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);\nCHECK_SIZE_AND_OFFSET(FILE, _markers);\nCHECK_SIZE_AND_OFFSET(FILE, _chain);\nCHECK_SIZE_AND_OFFSET(FILE, _fileno);\n\nCOMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));\nCHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);\nCHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);\nCHECK_TYPE_SIZE(obstack);\nCHECK_SIZE_AND_OFFSET(obstack, chunk_size);\nCHECK_SIZE_AND_OFFSET(obstack, chunk);\nCHECK_SIZE_AND_OFFSET(obstack, object_base);\nCHECK_SIZE_AND_OFFSET(obstack, next_free);\n\nCHECK_TYPE_SIZE(cookie_io_functions_t);\nCHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read);\nCHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);\nCHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);\nCHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);\n#endif  // SANITIZER_GLIBC\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD\nCHECK_TYPE_SIZE(sem_t);\n#endif\n\n#if SANITIZER_LINUX && defined(__arm__)\nCOMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);\n#endif\n\n#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_posix.h",
    "content": "//===-- sanitizer_platform_limits_posix.h ---------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific POSIX data structures.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H\n#define SANITIZER_PLATFORM_LIMITS_POSIX_H\n\n#if SANITIZER_LINUX || SANITIZER_MAC\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform.h\"\n\n#if defined(__sparc__)\n// FIXME: This can't be included from tsan which does not support sparc yet.\n#include \"sanitizer_glibc_version.h\"\n#endif\n\n# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))\n\nnamespace __sanitizer {\nextern unsigned struct_utsname_sz;\nextern unsigned struct_stat_sz;\n#if !SANITIZER_IOS\nextern unsigned struct_stat64_sz;\n#endif\nextern unsigned struct_rusage_sz;\nextern unsigned siginfo_t_sz;\nextern unsigned struct_itimerval_sz;\nextern unsigned pthread_t_sz;\nextern unsigned pthread_mutex_t_sz;\nextern unsigned pthread_cond_t_sz;\nextern unsigned pid_t_sz;\nextern unsigned timeval_sz;\nextern unsigned uid_t_sz;\nextern unsigned gid_t_sz;\nextern unsigned mbstate_t_sz;\nextern unsigned struct_timezone_sz;\nextern unsigned struct_tms_sz;\nextern unsigned struct_itimerspec_sz;\nextern unsigned struct_sigevent_sz;\nextern unsigned struct_stack_t_sz;\nextern unsigned struct_sched_param_sz;\nextern unsigned struct_statfs64_sz;\nextern unsigned struct_regex_sz;\nextern unsigned struct_regmatch_sz;\n\n#if !SANITIZER_ANDROID\nextern unsigned struct_fstab_sz;\nextern unsigned struct_statfs_sz;\nextern unsigned struct_sockaddr_sz;\nunsigned ucontext_t_sz(void *uctx);\n#  endif  // !SANITIZER_ANDROID\n\n#  if SANITIZER_LINUX\n\n#    if defined(__x86_64__)\nconst unsigned struct_kernel_stat_sz = 144;\nconst unsigned struct_kernel_stat64_sz = 0;\n#elif defined(__i386__)\nconst unsigned struct_kernel_stat_sz = 64;\nconst unsigned struct_kernel_stat64_sz = 96;\n#elif defined(__arm__)\nconst unsigned struct_kernel_stat_sz = 64;\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif defined(__aarch64__)\nconst unsigned struct_kernel_stat_sz = 128;\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif defined(__powerpc__) && !defined(__powerpc64__)\nconst unsigned struct_kernel_stat_sz = 72;\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif defined(__powerpc64__)\nconst unsigned struct_kernel_stat_sz = 144;\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif defined(__mips__)\nconst unsigned struct_kernel_stat_sz = SANITIZER_ANDROID\n                                           ? FIRST_32_SECOND_64(104, 128)\n                                           : FIRST_32_SECOND_64(160, 216);\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif defined(__s390__) && !defined(__s390x__)\nconst unsigned struct_kernel_stat_sz = 64;\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif defined(__s390x__)\nconst unsigned struct_kernel_stat_sz = 144;\nconst unsigned struct_kernel_stat64_sz = 0;\n#elif defined(__sparc__) && defined(__arch64__)\nconst unsigned struct___old_kernel_stat_sz = 0;\nconst unsigned struct_kernel_stat_sz = 104;\nconst unsigned struct_kernel_stat64_sz = 144;\n#elif defined(__sparc__) && !defined(__arch64__)\nconst unsigned struct___old_kernel_stat_sz = 0;\nconst unsigned struct_kernel_stat_sz = 64;\nconst unsigned struct_kernel_stat64_sz = 104;\n#elif SANITIZER_RISCV64\nconst unsigned struct_kernel_stat_sz = 128;\nconst unsigned struct_kernel_stat64_sz = 0;  // RISCV64 does not use stat64\n#    elif defined(__hexagon__)\nconst unsigned struct_kernel_stat_sz = 128;\nconst unsigned struct_kernel_stat64_sz = 0;\n#    endif\nstruct __sanitizer_perf_event_attr {\n  unsigned type;\n  unsigned size;\n  // More fields that vary with the kernel version.\n};\n\nextern unsigned struct_epoll_event_sz;\nextern unsigned struct_sysinfo_sz;\nextern unsigned __user_cap_header_struct_sz;\nextern unsigned __user_cap_data_struct_sz;\nextern unsigned struct_new_utsname_sz;\nextern unsigned struct_old_utsname_sz;\nextern unsigned struct_oldold_utsname_sz;\n\nconst unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);\n#endif  // SANITIZER_LINUX\n\n#if SANITIZER_LINUX\n\n#if defined(__powerpc64__) || defined(__s390__)\nconst unsigned struct___old_kernel_stat_sz = 0;\n#elif !defined(__sparc__)\nconst unsigned struct___old_kernel_stat_sz = 32;\n#endif\n\nextern unsigned struct_rlimit_sz;\nextern unsigned struct_utimbuf_sz;\nextern unsigned struct_timespec_sz;\n\nstruct __sanitizer_iocb {\n  u64 aio_data;\n  u32 aio_key_or_aio_reserved1;  // Simply crazy.\n  u32 aio_reserved1_or_aio_key;  // Luckily, we don't need these.\n  u16 aio_lio_opcode;\n  s16 aio_reqprio;\n  u32 aio_fildes;\n  u64 aio_buf;\n  u64 aio_nbytes;\n  s64 aio_offset;\n  u64 aio_reserved2;\n  u64 aio_reserved3;\n};\n\nstruct __sanitizer_io_event {\n  u64 data;\n  u64 obj;\n  u64 res;\n  u64 res2;\n};\n\nconst unsigned iocb_cmd_pread = 0;\nconst unsigned iocb_cmd_pwrite = 1;\nconst unsigned iocb_cmd_preadv = 7;\nconst unsigned iocb_cmd_pwritev = 8;\n\nstruct __sanitizer___sysctl_args {\n  int *name;\n  int nlen;\n  void *oldval;\n  uptr *oldlenp;\n  void *newval;\n  uptr newlen;\n  unsigned long ___unused[4];\n};\n\nconst unsigned old_sigset_t_sz = sizeof(unsigned long);\n\nstruct __sanitizer_sem_t {\n#if SANITIZER_ANDROID && defined(_LP64)\n  int data[4];\n#elif SANITIZER_ANDROID && !defined(_LP64)\n  int data;\n#elif SANITIZER_LINUX\n  uptr data[4];\n#endif\n};\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_ANDROID\nstruct __sanitizer_struct_mallinfo {\n  uptr v[10];\n};\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nstruct __sanitizer_struct_mallinfo {\n  int v[10];\n};\n\nextern unsigned struct_ustat_sz;\nextern unsigned struct_rlimit64_sz;\nextern unsigned struct_statvfs64_sz;\n\nstruct __sanitizer_ipc_perm {\n  int __key;\n  int uid;\n  int gid;\n  int cuid;\n  int cgid;\n#ifdef __powerpc__\n  unsigned mode;\n  unsigned __seq;\n  u64 __unused1;\n  u64 __unused2;\n#elif defined(__sparc__)\n  unsigned mode;\n  unsigned short __pad2;\n  unsigned short __seq;\n  unsigned long long __unused1;\n  unsigned long long __unused2;\n#else\n  unsigned int mode;\n  unsigned short __seq;\n  unsigned short __pad2;\n#if defined(__x86_64__) && !defined(_LP64)\n  u64 __unused1;\n  u64 __unused2;\n#else\n  unsigned long __unused1;\n  unsigned long __unused2;\n#endif\n#endif\n};\n\nstruct __sanitizer_shmid_ds {\n  __sanitizer_ipc_perm shm_perm;\n#if defined(__sparc__)\n#if !defined(__arch64__)\n  u32 __pad1;\n#endif\n  long shm_atime;\n#if !defined(__arch64__)\n  u32 __pad2;\n#endif\n  long shm_dtime;\n#if !defined(__arch64__)\n  u32 __pad3;\n#endif\n  long shm_ctime;\n  uptr shm_segsz;\n  int shm_cpid;\n  int shm_lpid;\n  unsigned long shm_nattch;\n  unsigned long __glibc_reserved1;\n  unsigned long __glibc_reserved2;\n#else\n#ifndef __powerpc__\n  uptr shm_segsz;\n#elif !defined(__powerpc64__)\n  uptr __unused0;\n#endif\n#if defined(__x86_64__) && !defined(_LP64)\n  u64 shm_atime;\n  u64 shm_dtime;\n  u64 shm_ctime;\n#else\n  uptr shm_atime;\n#if !defined(_LP64) && !defined(__mips__)\n  uptr __unused1;\n#endif\n  uptr shm_dtime;\n#if !defined(_LP64) && !defined(__mips__)\n  uptr __unused2;\n#endif\n  uptr shm_ctime;\n#if !defined(_LP64) && !defined(__mips__)\n  uptr __unused3;\n#endif\n#endif\n#ifdef __powerpc__\n  uptr shm_segsz;\n#endif\n  int shm_cpid;\n  int shm_lpid;\n#if defined(__x86_64__) && !defined(_LP64)\n  u64 shm_nattch;\n  u64 __unused4;\n  u64 __unused5;\n#else\n  uptr shm_nattch;\n  uptr __unused4;\n  uptr __unused5;\n#endif\n#endif\n};\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nextern unsigned struct_msqid_ds_sz;\nextern unsigned struct_mq_attr_sz;\nextern unsigned struct_timex_sz;\nextern unsigned struct_statvfs_sz;\nextern unsigned struct_crypt_data_sz;\n#endif  // SANITIZER_LINUX && !SANITIZER_ANDROID\n\nstruct __sanitizer_iovec {\n  void *iov_base;\n  uptr iov_len;\n};\n\n#if !SANITIZER_ANDROID\nstruct __sanitizer_ifaddrs {\n  struct __sanitizer_ifaddrs *ifa_next;\n  char *ifa_name;\n  unsigned int ifa_flags;\n  void *ifa_addr;     // (struct sockaddr *)\n  void *ifa_netmask;  // (struct sockaddr *)\n  // This is a union on Linux.\n# ifdef ifa_dstaddr\n# undef ifa_dstaddr\n# endif\n  void *ifa_dstaddr;  // (struct sockaddr *)\n  void *ifa_data;\n};\n#endif  // !SANITIZER_ANDROID\n\n#if SANITIZER_MAC\ntypedef unsigned long __sanitizer_pthread_key_t;\n#else\ntypedef unsigned __sanitizer_pthread_key_t;\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\n\nstruct __sanitizer_XDR {\n  int x_op;\n  void *x_ops;\n  uptr x_public;\n  uptr x_private;\n  uptr x_base;\n  unsigned x_handy;\n};\n\nconst int __sanitizer_XDR_ENCODE = 0;\nconst int __sanitizer_XDR_DECODE = 1;\nconst int __sanitizer_XDR_FREE = 2;\n#endif\n\nstruct __sanitizer_passwd {\n  char *pw_name;\n  char *pw_passwd;\n  int pw_uid;\n  int pw_gid;\n#if SANITIZER_MAC\n  long pw_change;\n  char *pw_class;\n#endif\n#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32))\n  char *pw_gecos;\n#endif\n  char *pw_dir;\n  char *pw_shell;\n#if SANITIZER_MAC\n  long pw_expire;\n#endif\n};\n\nstruct __sanitizer_group {\n  char *gr_name;\n  char *gr_passwd;\n  int gr_gid;\n  char **gr_mem;\n};\n\n#  if (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__)\ntypedef long long __sanitizer_time_t;\n#else\ntypedef long __sanitizer_time_t;\n#endif\n\ntypedef long __sanitizer_suseconds_t;\n\nstruct __sanitizer_timeval {\n  __sanitizer_time_t tv_sec;\n  __sanitizer_suseconds_t tv_usec;\n};\n\nstruct __sanitizer_itimerval {\n  struct __sanitizer_timeval it_interval;\n  struct __sanitizer_timeval it_value;\n};\n\nstruct __sanitizer_timeb {\n  __sanitizer_time_t time;\n  unsigned short millitm;\n  short timezone;\n  short dstflag;\n};\n\nstruct __sanitizer_ether_addr {\n  u8 octet[6];\n};\n\nstruct __sanitizer_tm {\n  int tm_sec;\n  int tm_min;\n  int tm_hour;\n  int tm_mday;\n  int tm_mon;\n  int tm_year;\n  int tm_wday;\n  int tm_yday;\n  int tm_isdst;\n  long int tm_gmtoff;\n  const char *tm_zone;\n};\n\n#if SANITIZER_LINUX\nstruct __sanitizer_mntent {\n  char *mnt_fsname;\n  char *mnt_dir;\n  char *mnt_type;\n  char *mnt_opts;\n  int mnt_freq;\n  int mnt_passno;\n};\n\nstruct __sanitizer_file_handle {\n  unsigned int handle_bytes;\n  int handle_type;\n  unsigned char f_handle[1];  // variable sized\n};\n#endif\n\n#if SANITIZER_MAC\nstruct __sanitizer_msghdr {\n  void *msg_name;\n  unsigned msg_namelen;\n  struct __sanitizer_iovec *msg_iov;\n  unsigned msg_iovlen;\n  void *msg_control;\n  unsigned msg_controllen;\n  int msg_flags;\n};\nstruct __sanitizer_cmsghdr {\n  unsigned cmsg_len;\n  int cmsg_level;\n  int cmsg_type;\n};\n#else\n// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but\n// many implementations don't conform to the standard.\nstruct __sanitizer_msghdr {\n  void *msg_name;\n  unsigned msg_namelen;\n  struct __sanitizer_iovec *msg_iov;\n  uptr msg_iovlen;\n  void *msg_control;\n  uptr msg_controllen;\n  int msg_flags;\n};\nstruct __sanitizer_cmsghdr {\n  uptr cmsg_len;\n  int cmsg_level;\n  int cmsg_type;\n};\n#endif\n\n#if SANITIZER_LINUX\nstruct __sanitizer_mmsghdr {\n  __sanitizer_msghdr msg_hdr;\n  unsigned int msg_len;\n};\n#endif\n\n#if SANITIZER_MAC\nstruct __sanitizer_dirent {\n  unsigned long long d_ino;\n  unsigned long long d_seekoff;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n#  elif SANITIZER_ANDROID || defined(__x86_64__) || defined(__hexagon__)\nstruct __sanitizer_dirent {\n  unsigned long long d_ino;\n  unsigned long long d_off;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n#  else\nstruct __sanitizer_dirent {\n  uptr d_ino;\n  uptr d_off;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n#  endif\n\n#  if SANITIZER_LINUX && !SANITIZER_ANDROID\nstruct __sanitizer_dirent64 {\n  unsigned long long d_ino;\n  unsigned long long d_off;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n#endif\n\n#if defined(__x86_64__) && !defined(_LP64)\ntypedef long long __sanitizer_clock_t;\n#else\ntypedef long __sanitizer_clock_t;\n#endif\n\n#if SANITIZER_LINUX\ntypedef int __sanitizer_clockid_t;\n#endif\n\n#if SANITIZER_LINUX\n#    if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \\\n        defined(__mips__) || defined(__hexagon__)\ntypedef unsigned __sanitizer___kernel_uid_t;\ntypedef unsigned __sanitizer___kernel_gid_t;\n#else\ntypedef unsigned short __sanitizer___kernel_uid_t;\ntypedef unsigned short __sanitizer___kernel_gid_t;\n#endif\n#if defined(__x86_64__) && !defined(_LP64)\ntypedef long long __sanitizer___kernel_off_t;\n#else\ntypedef long __sanitizer___kernel_off_t;\n#endif\n\n#if defined(__powerpc__) || defined(__mips__)\ntypedef unsigned int __sanitizer___kernel_old_uid_t;\ntypedef unsigned int __sanitizer___kernel_old_gid_t;\n#else\ntypedef unsigned short __sanitizer___kernel_old_uid_t;\ntypedef unsigned short __sanitizer___kernel_old_gid_t;\n#endif\n\ntypedef long long __sanitizer___kernel_loff_t;\ntypedef struct {\n  unsigned long fds_bits[1024 / (8 * sizeof(long))];\n} __sanitizer___kernel_fd_set;\n#endif\n\n// This thing depends on the platform. We are only interested in the upper\n// limit. Verified with a compiler assert in .cpp.\nunion __sanitizer_pthread_attr_t {\n  char size[128];\n  void *align;\n};\n\n#if SANITIZER_ANDROID\n# if SANITIZER_MIPS\ntypedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)];\n# else\ntypedef unsigned long __sanitizer_sigset_t;\n# endif\n#elif SANITIZER_MAC\ntypedef unsigned __sanitizer_sigset_t;\n#elif SANITIZER_LINUX\nstruct __sanitizer_sigset_t {\n  // The size is determined by looking at sizeof of real sigset_t on linux.\n  uptr val[128 / sizeof(uptr)];\n};\n#endif\n\nstruct __sanitizer_siginfo {\n  // The size is determined by looking at sizeof of real siginfo_t on linux.\n  u64 opaque[128 / sizeof(u64)];\n};\n\nusing __sanitizer_sighandler_ptr = void (*)(int sig);\nusing __sanitizer_sigactionhandler_ptr = void (*)(int sig,\n                                                  __sanitizer_siginfo *siginfo,\n                                                  void *uctx);\n\n// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.\n#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64)\nstruct __sanitizer_sigaction {\n  unsigned sa_flags;\n  union {\n    __sanitizer_sigactionhandler_ptr sigaction;\n    __sanitizer_sighandler_ptr handler;\n  };\n  __sanitizer_sigset_t sa_mask;\n  void (*sa_restorer)();\n};\n#elif SANITIZER_ANDROID && SANITIZER_MIPS32  // check this before WORDSIZE == 32\nstruct __sanitizer_sigaction {\n  unsigned sa_flags;\n  union {\n    __sanitizer_sigactionhandler_ptr sigaction;\n    __sanitizer_sighandler_ptr handler;\n  };\n  __sanitizer_sigset_t sa_mask;\n};\n#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)\nstruct __sanitizer_sigaction {\n  union {\n    __sanitizer_sigactionhandler_ptr sigaction;\n    __sanitizer_sighandler_ptr handler;\n  };\n  __sanitizer_sigset_t sa_mask;\n  uptr sa_flags;\n  void (*sa_restorer)();\n};\n#else // !SANITIZER_ANDROID\nstruct __sanitizer_sigaction {\n#if defined(__mips__) && !SANITIZER_FREEBSD\n  unsigned int sa_flags;\n#endif\n  union {\n    __sanitizer_sigactionhandler_ptr sigaction;\n    __sanitizer_sighandler_ptr handler;\n  };\n#if SANITIZER_FREEBSD\n  int sa_flags;\n  __sanitizer_sigset_t sa_mask;\n#else\n#if defined(__s390x__)\n  int sa_resv;\n#else\n  __sanitizer_sigset_t sa_mask;\n#endif\n#ifndef __mips__\n#if defined(__sparc__)\n#if __GLIBC_PREREQ (2, 20)\n  // On sparc glibc 2.19 and earlier sa_flags was unsigned long.\n#if defined(__arch64__)\n  // To maintain ABI compatibility on sparc64 when switching to an int,\n  // __glibc_reserved0 was added.\n  int __glibc_reserved0;\n#endif\n  int sa_flags;\n#else\n  unsigned long sa_flags;\n#endif\n#else\n  int sa_flags;\n#endif\n#endif\n#endif\n#if SANITIZER_LINUX\n  void (*sa_restorer)();\n#endif\n#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)\n  int sa_resv[1];\n#endif\n#if defined(__s390x__)\n  __sanitizer_sigset_t sa_mask;\n#endif\n};\n#endif // !SANITIZER_ANDROID\n\n#if defined(__mips__)\n#define __SANITIZER_KERNEL_NSIG 128\n#else\n#define __SANITIZER_KERNEL_NSIG 64\n#endif\n\nstruct __sanitizer_kernel_sigset_t {\n  uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)];\n};\n\n// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.\n#if SANITIZER_MIPS\nstruct __sanitizer_kernel_sigaction_t {\n  unsigned int sa_flags;\n  union {\n    void (*handler)(int signo);\n    void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);\n  };\n  __sanitizer_kernel_sigset_t sa_mask;\n  void (*sa_restorer)(void);\n};\n#else\nstruct __sanitizer_kernel_sigaction_t {\n  union {\n    void (*handler)(int signo);\n    void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);\n  };\n  unsigned long sa_flags;\n  void (*sa_restorer)(void);\n  __sanitizer_kernel_sigset_t sa_mask;\n};\n#endif\n\nextern const uptr sig_ign;\nextern const uptr sig_dfl;\nextern const uptr sig_err;\nextern const uptr sa_siginfo;\n\n#if SANITIZER_LINUX\nextern int e_tabsz;\n#endif\n\nextern int af_inet;\nextern int af_inet6;\nuptr __sanitizer_in_addr_sz(int af);\n\n#if SANITIZER_LINUX\nstruct __sanitizer_dl_phdr_info {\n  uptr dlpi_addr;\n  const char *dlpi_name;\n  const void *dlpi_phdr;\n  short dlpi_phnum;\n};\n\nextern unsigned struct_ElfW_Phdr_sz;\n#endif\n\nstruct __sanitizer_protoent {\n  char *p_name;\n  char **p_aliases;\n  int p_proto;\n};\n\nstruct __sanitizer_netent {\n  char *n_name;\n  char **n_aliases;\n  int n_addrtype;\n  u32 n_net;\n};\n\nstruct __sanitizer_addrinfo {\n  int ai_flags;\n  int ai_family;\n  int ai_socktype;\n  int ai_protocol;\n#if SANITIZER_ANDROID || SANITIZER_MAC\n  unsigned ai_addrlen;\n  char *ai_canonname;\n  void *ai_addr;\n#else // LINUX\n  unsigned ai_addrlen;\n  void *ai_addr;\n  char *ai_canonname;\n#endif\n  struct __sanitizer_addrinfo *ai_next;\n};\n\nstruct __sanitizer_hostent {\n  char *h_name;\n  char **h_aliases;\n  int h_addrtype;\n  int h_length;\n  char **h_addr_list;\n};\n\nstruct __sanitizer_pollfd {\n  int fd;\n  short events;\n  short revents;\n};\n\n#if SANITIZER_ANDROID || SANITIZER_MAC\ntypedef unsigned __sanitizer_nfds_t;\n#else\ntypedef unsigned long __sanitizer_nfds_t;\n#endif\n\n#if !SANITIZER_ANDROID\n# if SANITIZER_LINUX\nstruct __sanitizer_glob_t {\n  uptr gl_pathc;\n  char **gl_pathv;\n  uptr gl_offs;\n  int gl_flags;\n\n  void (*gl_closedir)(void *dirp);\n  void *(*gl_readdir)(void *dirp);\n  void *(*gl_opendir)(const char *);\n  int (*gl_lstat)(const char *, void *);\n  int (*gl_stat)(const char *, void *);\n};\n# endif  // SANITIZER_LINUX\n\n# if SANITIZER_LINUX\nextern int glob_nomatch;\nextern int glob_altdirfunc;\n# endif\n#endif  // !SANITIZER_ANDROID\n\nextern unsigned path_max;\n\n#  if !SANITIZER_ANDROID\nextern const int wordexp_wrde_dooffs;\n#  endif  // !SANITIZER_ANDROID\n\nstruct __sanitizer_wordexp_t {\n  uptr we_wordc;\n  char **we_wordv;\n  uptr we_offs;\n};\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nstruct __sanitizer_FILE {\n  int _flags;\n  char *_IO_read_ptr;\n  char *_IO_read_end;\n  char *_IO_read_base;\n  char *_IO_write_base;\n  char *_IO_write_ptr;\n  char *_IO_write_end;\n  char *_IO_buf_base;\n  char *_IO_buf_end;\n  char *_IO_save_base;\n  char *_IO_backup_base;\n  char *_IO_save_end;\n  void *_markers;\n  __sanitizer_FILE *_chain;\n  int _fileno;\n};\n# define SANITIZER_HAS_STRUCT_FILE 1\n#else\ntypedef void __sanitizer_FILE;\n# define SANITIZER_HAS_STRUCT_FILE 0\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID &&                               \\\n    (defined(__i386) || defined(__x86_64) || defined(__mips64) ||          \\\n     defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \\\n     defined(__s390__) || SANITIZER_RISCV64)\nextern unsigned struct_user_regs_struct_sz;\nextern unsigned struct_user_fpregs_struct_sz;\nextern unsigned struct_user_fpxregs_struct_sz;\nextern unsigned struct_user_vfpregs_struct_sz;\n\nextern int ptrace_peektext;\nextern int ptrace_peekdata;\nextern int ptrace_peekuser;\nextern int ptrace_getregs;\nextern int ptrace_setregs;\nextern int ptrace_getfpregs;\nextern int ptrace_setfpregs;\nextern int ptrace_getfpxregs;\nextern int ptrace_setfpxregs;\nextern int ptrace_getvfpregs;\nextern int ptrace_setvfpregs;\nextern int ptrace_getsiginfo;\nextern int ptrace_setsiginfo;\nextern int ptrace_getregset;\nextern int ptrace_setregset;\nextern int ptrace_geteventmsg;\n#endif\n\n#if SANITIZER_LINUX  && !SANITIZER_ANDROID\nextern unsigned struct_shminfo_sz;\nextern unsigned struct_shm_info_sz;\nextern int shmctl_ipc_stat;\nextern int shmctl_ipc_info;\nextern int shmctl_shm_info;\nextern int shmctl_shm_stat;\n#endif\n\n#if !SANITIZER_MAC && !SANITIZER_FREEBSD\nextern unsigned struct_utmp_sz;\n#endif\n#if !SANITIZER_ANDROID\nextern unsigned struct_utmpx_sz;\n#endif\n\nextern int map_fixed;\n\n// ioctl arguments\nstruct __sanitizer_ifconf {\n  int ifc_len;\n  union {\n    void *ifcu_req;\n  } ifc_ifcu;\n#if SANITIZER_MAC\n} __attribute__((packed));\n#else\n};\n#endif\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nstruct __sanitizer__obstack_chunk {\n  char *limit;\n  struct __sanitizer__obstack_chunk *prev;\n};\n\nstruct __sanitizer_obstack {\n  long chunk_size;\n  struct __sanitizer__obstack_chunk *chunk;\n  char *object_base;\n  char *next_free;\n  uptr more_fields[7];\n};\n\ntypedef uptr (*__sanitizer_cookie_io_read)(void *cookie, char *buf, uptr size);\ntypedef uptr (*__sanitizer_cookie_io_write)(void *cookie, const char *buf,\n                                            uptr size);\ntypedef int (*__sanitizer_cookie_io_seek)(void *cookie, u64 *offset,\n                                          int whence);\ntypedef int (*__sanitizer_cookie_io_close)(void *cookie);\n\nstruct __sanitizer_cookie_io_functions_t {\n  __sanitizer_cookie_io_read read;\n  __sanitizer_cookie_io_write write;\n  __sanitizer_cookie_io_seek seek;\n  __sanitizer_cookie_io_close close;\n};\n#endif\n\n#define IOC_NRBITS 8\n#define IOC_TYPEBITS 8\n#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \\\n    defined(__sparc__)\n#define IOC_SIZEBITS 13\n#define IOC_DIRBITS 3\n#define IOC_NONE 1U\n#define IOC_WRITE 4U\n#define IOC_READ 2U\n#else\n#define IOC_SIZEBITS 14\n#define IOC_DIRBITS 2\n#define IOC_NONE 0U\n#define IOC_WRITE 1U\n#define IOC_READ 2U\n#endif\n#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)\n#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)\n#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)\n#if defined(IOC_DIRMASK)\n#undef IOC_DIRMASK\n#endif\n#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)\n#define IOC_NRSHIFT 0\n#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)\n#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)\n#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)\n#define EVIOC_EV_MAX 0x1f\n#define EVIOC_ABS_MAX 0x3f\n\n#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)\n#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)\n#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)\n\n#if defined(__sparc__)\n// In sparc the 14 bits SIZE field overlaps with the\n// least significant bit of DIR, so either IOC_READ or\n// IOC_WRITE shall be 1 in order to get a non-zero SIZE.\n#define IOC_SIZE(nr) \\\n  ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))\n#else\n#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)\n#endif\n\nextern unsigned struct_ifreq_sz;\nextern unsigned struct_termios_sz;\nextern unsigned struct_winsize_sz;\n\n#if SANITIZER_LINUX\nextern unsigned struct_arpreq_sz;\nextern unsigned struct_cdrom_msf_sz;\nextern unsigned struct_cdrom_multisession_sz;\nextern unsigned struct_cdrom_read_audio_sz;\nextern unsigned struct_cdrom_subchnl_sz;\nextern unsigned struct_cdrom_ti_sz;\nextern unsigned struct_cdrom_tocentry_sz;\nextern unsigned struct_cdrom_tochdr_sz;\nextern unsigned struct_cdrom_volctrl_sz;\nextern unsigned struct_ff_effect_sz;\nextern unsigned struct_floppy_drive_params_sz;\nextern unsigned struct_floppy_drive_struct_sz;\nextern unsigned struct_floppy_fdc_state_sz;\nextern unsigned struct_floppy_max_errors_sz;\nextern unsigned struct_floppy_raw_cmd_sz;\nextern unsigned struct_floppy_struct_sz;\nextern unsigned struct_floppy_write_errors_sz;\nextern unsigned struct_format_descr_sz;\nextern unsigned struct_hd_driveid_sz;\nextern unsigned struct_hd_geometry_sz;\nextern unsigned struct_input_absinfo_sz;\nextern unsigned struct_input_id_sz;\nextern unsigned struct_mtpos_sz;\nextern unsigned struct_termio_sz;\nextern unsigned struct_vt_consize_sz;\nextern unsigned struct_vt_sizes_sz;\nextern unsigned struct_vt_stat_sz;\n#endif  // SANITIZER_LINUX\n\n#if SANITIZER_LINUX\nextern unsigned struct_copr_buffer_sz;\nextern unsigned struct_copr_debug_buf_sz;\nextern unsigned struct_copr_msg_sz;\nextern unsigned struct_midi_info_sz;\nextern unsigned struct_mtget_sz;\nextern unsigned struct_mtop_sz;\nextern unsigned struct_rtentry_sz;\nextern unsigned struct_sbi_instrument_sz;\nextern unsigned struct_seq_event_rec_sz;\nextern unsigned struct_synth_info_sz;\nextern unsigned struct_vt_mode_sz;\n#endif // SANITIZER_LINUX\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nextern unsigned struct_ax25_parms_struct_sz;\nextern unsigned struct_input_keymap_entry_sz;\nextern unsigned struct_ipx_config_data_sz;\nextern unsigned struct_kbdiacrs_sz;\nextern unsigned struct_kbentry_sz;\nextern unsigned struct_kbkeycode_sz;\nextern unsigned struct_kbsentry_sz;\nextern unsigned struct_mtconfiginfo_sz;\nextern unsigned struct_nr_parms_struct_sz;\nextern unsigned struct_scc_modem_sz;\nextern unsigned struct_scc_stat_sz;\nextern unsigned struct_serial_multiport_struct_sz;\nextern unsigned struct_serial_struct_sz;\nextern unsigned struct_sockaddr_ax25_sz;\nextern unsigned struct_unimapdesc_sz;\nextern unsigned struct_unimapinit_sz;\n#endif  // SANITIZER_LINUX && !SANITIZER_ANDROID\n\nextern const unsigned long __sanitizer_bufsiz;\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nextern unsigned struct_audio_buf_info_sz;\nextern unsigned struct_ppp_stats_sz;\n#endif  // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID\n\n#if !SANITIZER_ANDROID && !SANITIZER_MAC\nextern unsigned struct_sioc_sg_req_sz;\nextern unsigned struct_sioc_vif_req_sz;\n#endif\n\n// ioctl request identifiers\n\n// A special value to mark ioctls that are not present on the target platform,\n// when it can not be determined without including any system headers.\nextern const unsigned IOCTL_NOT_PRESENT;\n\nextern unsigned IOCTL_FIOASYNC;\nextern unsigned IOCTL_FIOCLEX;\nextern unsigned IOCTL_FIOGETOWN;\nextern unsigned IOCTL_FIONBIO;\nextern unsigned IOCTL_FIONCLEX;\nextern unsigned IOCTL_FIOSETOWN;\nextern unsigned IOCTL_SIOCADDMULTI;\nextern unsigned IOCTL_SIOCATMARK;\nextern unsigned IOCTL_SIOCDELMULTI;\nextern unsigned IOCTL_SIOCGIFADDR;\nextern unsigned IOCTL_SIOCGIFBRDADDR;\nextern unsigned IOCTL_SIOCGIFCONF;\nextern unsigned IOCTL_SIOCGIFDSTADDR;\nextern unsigned IOCTL_SIOCGIFFLAGS;\nextern unsigned IOCTL_SIOCGIFMETRIC;\nextern unsigned IOCTL_SIOCGIFMTU;\nextern unsigned IOCTL_SIOCGIFNETMASK;\nextern unsigned IOCTL_SIOCGPGRP;\nextern unsigned IOCTL_SIOCSIFADDR;\nextern unsigned IOCTL_SIOCSIFBRDADDR;\nextern unsigned IOCTL_SIOCSIFDSTADDR;\nextern unsigned IOCTL_SIOCSIFFLAGS;\nextern unsigned IOCTL_SIOCSIFMETRIC;\nextern unsigned IOCTL_SIOCSIFMTU;\nextern unsigned IOCTL_SIOCSIFNETMASK;\nextern unsigned IOCTL_SIOCSPGRP;\nextern unsigned IOCTL_TIOCCONS;\nextern unsigned IOCTL_TIOCEXCL;\nextern unsigned IOCTL_TIOCGETD;\nextern unsigned IOCTL_TIOCGPGRP;\nextern unsigned IOCTL_TIOCGWINSZ;\nextern unsigned IOCTL_TIOCMBIC;\nextern unsigned IOCTL_TIOCMBIS;\nextern unsigned IOCTL_TIOCMGET;\nextern unsigned IOCTL_TIOCMSET;\nextern unsigned IOCTL_TIOCNOTTY;\nextern unsigned IOCTL_TIOCNXCL;\nextern unsigned IOCTL_TIOCOUTQ;\nextern unsigned IOCTL_TIOCPKT;\nextern unsigned IOCTL_TIOCSCTTY;\nextern unsigned IOCTL_TIOCSETD;\nextern unsigned IOCTL_TIOCSPGRP;\nextern unsigned IOCTL_TIOCSTI;\nextern unsigned IOCTL_TIOCSWINSZ;\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nextern unsigned IOCTL_SIOCGETSGCNT;\nextern unsigned IOCTL_SIOCGETVIFCNT;\n#endif\n#if SANITIZER_LINUX\nextern unsigned IOCTL_EVIOCGABS;\nextern unsigned IOCTL_EVIOCGBIT;\nextern unsigned IOCTL_EVIOCGEFFECTS;\nextern unsigned IOCTL_EVIOCGID;\nextern unsigned IOCTL_EVIOCGKEY;\nextern unsigned IOCTL_EVIOCGKEYCODE;\nextern unsigned IOCTL_EVIOCGLED;\nextern unsigned IOCTL_EVIOCGNAME;\nextern unsigned IOCTL_EVIOCGPHYS;\nextern unsigned IOCTL_EVIOCGRAB;\nextern unsigned IOCTL_EVIOCGREP;\nextern unsigned IOCTL_EVIOCGSND;\nextern unsigned IOCTL_EVIOCGSW;\nextern unsigned IOCTL_EVIOCGUNIQ;\nextern unsigned IOCTL_EVIOCGVERSION;\nextern unsigned IOCTL_EVIOCRMFF;\nextern unsigned IOCTL_EVIOCSABS;\nextern unsigned IOCTL_EVIOCSFF;\nextern unsigned IOCTL_EVIOCSKEYCODE;\nextern unsigned IOCTL_EVIOCSREP;\nextern unsigned IOCTL_BLKFLSBUF;\nextern unsigned IOCTL_BLKGETSIZE;\nextern unsigned IOCTL_BLKRAGET;\nextern unsigned IOCTL_BLKRASET;\nextern unsigned IOCTL_BLKROGET;\nextern unsigned IOCTL_BLKROSET;\nextern unsigned IOCTL_BLKRRPART;\nextern unsigned IOCTL_CDROMAUDIOBUFSIZ;\nextern unsigned IOCTL_CDROMEJECT;\nextern unsigned IOCTL_CDROMEJECT_SW;\nextern unsigned IOCTL_CDROMMULTISESSION;\nextern unsigned IOCTL_CDROMPAUSE;\nextern unsigned IOCTL_CDROMPLAYMSF;\nextern unsigned IOCTL_CDROMPLAYTRKIND;\nextern unsigned IOCTL_CDROMREADAUDIO;\nextern unsigned IOCTL_CDROMREADCOOKED;\nextern unsigned IOCTL_CDROMREADMODE1;\nextern unsigned IOCTL_CDROMREADMODE2;\nextern unsigned IOCTL_CDROMREADRAW;\nextern unsigned IOCTL_CDROMREADTOCENTRY;\nextern unsigned IOCTL_CDROMREADTOCHDR;\nextern unsigned IOCTL_CDROMRESET;\nextern unsigned IOCTL_CDROMRESUME;\nextern unsigned IOCTL_CDROMSEEK;\nextern unsigned IOCTL_CDROMSTART;\nextern unsigned IOCTL_CDROMSTOP;\nextern unsigned IOCTL_CDROMSUBCHNL;\nextern unsigned IOCTL_CDROMVOLCTRL;\nextern unsigned IOCTL_CDROMVOLREAD;\nextern unsigned IOCTL_CDROM_GET_UPC;\nextern unsigned IOCTL_FDCLRPRM;\nextern unsigned IOCTL_FDDEFPRM;\nextern unsigned IOCTL_FDFLUSH;\nextern unsigned IOCTL_FDFMTBEG;\nextern unsigned IOCTL_FDFMTEND;\nextern unsigned IOCTL_FDFMTTRK;\nextern unsigned IOCTL_FDGETDRVPRM;\nextern unsigned IOCTL_FDGETDRVSTAT;\nextern unsigned IOCTL_FDGETDRVTYP;\nextern unsigned IOCTL_FDGETFDCSTAT;\nextern unsigned IOCTL_FDGETMAXERRS;\nextern unsigned IOCTL_FDGETPRM;\nextern unsigned IOCTL_FDMSGOFF;\nextern unsigned IOCTL_FDMSGON;\nextern unsigned IOCTL_FDPOLLDRVSTAT;\nextern unsigned IOCTL_FDRAWCMD;\nextern unsigned IOCTL_FDRESET;\nextern unsigned IOCTL_FDSETDRVPRM;\nextern unsigned IOCTL_FDSETEMSGTRESH;\nextern unsigned IOCTL_FDSETMAXERRS;\nextern unsigned IOCTL_FDSETPRM;\nextern unsigned IOCTL_FDTWADDLE;\nextern unsigned IOCTL_FDWERRORCLR;\nextern unsigned IOCTL_FDWERRORGET;\nextern unsigned IOCTL_HDIO_DRIVE_CMD;\nextern unsigned IOCTL_HDIO_GETGEO;\nextern unsigned IOCTL_HDIO_GET_32BIT;\nextern unsigned IOCTL_HDIO_GET_DMA;\nextern unsigned IOCTL_HDIO_GET_IDENTITY;\nextern unsigned IOCTL_HDIO_GET_KEEPSETTINGS;\nextern unsigned IOCTL_HDIO_GET_MULTCOUNT;\nextern unsigned IOCTL_HDIO_GET_NOWERR;\nextern unsigned IOCTL_HDIO_GET_UNMASKINTR;\nextern unsigned IOCTL_HDIO_SET_32BIT;\nextern unsigned IOCTL_HDIO_SET_DMA;\nextern unsigned IOCTL_HDIO_SET_KEEPSETTINGS;\nextern unsigned IOCTL_HDIO_SET_MULTCOUNT;\nextern unsigned IOCTL_HDIO_SET_NOWERR;\nextern unsigned IOCTL_HDIO_SET_UNMASKINTR;\nextern unsigned IOCTL_MTIOCPOS;\nextern unsigned IOCTL_PPPIOCGASYNCMAP;\nextern unsigned IOCTL_PPPIOCGDEBUG;\nextern unsigned IOCTL_PPPIOCGFLAGS;\nextern unsigned IOCTL_PPPIOCGUNIT;\nextern unsigned IOCTL_PPPIOCGXASYNCMAP;\nextern unsigned IOCTL_PPPIOCSASYNCMAP;\nextern unsigned IOCTL_PPPIOCSDEBUG;\nextern unsigned IOCTL_PPPIOCSFLAGS;\nextern unsigned IOCTL_PPPIOCSMAXCID;\nextern unsigned IOCTL_PPPIOCSMRU;\nextern unsigned IOCTL_PPPIOCSXASYNCMAP;\nextern unsigned IOCTL_SIOCDARP;\nextern unsigned IOCTL_SIOCDRARP;\nextern unsigned IOCTL_SIOCGARP;\nextern unsigned IOCTL_SIOCGIFENCAP;\nextern unsigned IOCTL_SIOCGIFHWADDR;\nextern unsigned IOCTL_SIOCGIFMAP;\nextern unsigned IOCTL_SIOCGIFMEM;\nextern unsigned IOCTL_SIOCGIFNAME;\nextern unsigned IOCTL_SIOCGIFSLAVE;\nextern unsigned IOCTL_SIOCGRARP;\nextern unsigned IOCTL_SIOCGSTAMP;\nextern unsigned IOCTL_SIOCSARP;\nextern unsigned IOCTL_SIOCSIFENCAP;\nextern unsigned IOCTL_SIOCSIFHWADDR;\nextern unsigned IOCTL_SIOCSIFLINK;\nextern unsigned IOCTL_SIOCSIFMAP;\nextern unsigned IOCTL_SIOCSIFMEM;\nextern unsigned IOCTL_SIOCSIFSLAVE;\nextern unsigned IOCTL_SIOCSRARP;\nextern unsigned IOCTL_SNDCTL_COPR_HALT;\nextern unsigned IOCTL_SNDCTL_COPR_LOAD;\nextern unsigned IOCTL_SNDCTL_COPR_RCODE;\nextern unsigned IOCTL_SNDCTL_COPR_RCVMSG;\nextern unsigned IOCTL_SNDCTL_COPR_RDATA;\nextern unsigned IOCTL_SNDCTL_COPR_RESET;\nextern unsigned IOCTL_SNDCTL_COPR_RUN;\nextern unsigned IOCTL_SNDCTL_COPR_SENDMSG;\nextern unsigned IOCTL_SNDCTL_COPR_WCODE;\nextern unsigned IOCTL_SNDCTL_COPR_WDATA;\nextern unsigned IOCTL_TCFLSH;\nextern unsigned IOCTL_TCGETA;\nextern unsigned IOCTL_TCGETS;\nextern unsigned IOCTL_TCSBRK;\nextern unsigned IOCTL_TCSBRKP;\nextern unsigned IOCTL_TCSETA;\nextern unsigned IOCTL_TCSETAF;\nextern unsigned IOCTL_TCSETAW;\nextern unsigned IOCTL_TCSETS;\nextern unsigned IOCTL_TCSETSF;\nextern unsigned IOCTL_TCSETSW;\nextern unsigned IOCTL_TCXONC;\nextern unsigned IOCTL_TIOCGLCKTRMIOS;\nextern unsigned IOCTL_TIOCGSOFTCAR;\nextern unsigned IOCTL_TIOCINQ;\nextern unsigned IOCTL_TIOCLINUX;\nextern unsigned IOCTL_TIOCSERCONFIG;\nextern unsigned IOCTL_TIOCSERGETLSR;\nextern unsigned IOCTL_TIOCSERGWILD;\nextern unsigned IOCTL_TIOCSERSWILD;\nextern unsigned IOCTL_TIOCSLCKTRMIOS;\nextern unsigned IOCTL_TIOCSSOFTCAR;\nextern unsigned IOCTL_VT_DISALLOCATE;\nextern unsigned IOCTL_VT_GETSTATE;\nextern unsigned IOCTL_VT_RESIZE;\nextern unsigned IOCTL_VT_RESIZEX;\nextern unsigned IOCTL_VT_SENDSIG;\nextern unsigned IOCTL_MTIOCGET;\nextern unsigned IOCTL_MTIOCTOP;\nextern unsigned IOCTL_SIOCADDRT;\nextern unsigned IOCTL_SIOCDELRT;\nextern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;\nextern unsigned IOCTL_SNDCTL_DSP_GETFMTS;\nextern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;\nextern unsigned IOCTL_SNDCTL_DSP_POST;\nextern unsigned IOCTL_SNDCTL_DSP_RESET;\nextern unsigned IOCTL_SNDCTL_DSP_SETFMT;\nextern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;\nextern unsigned IOCTL_SNDCTL_DSP_SPEED;\nextern unsigned IOCTL_SNDCTL_DSP_STEREO;\nextern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;\nextern unsigned IOCTL_SNDCTL_DSP_SYNC;\nextern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;\nextern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;\nextern unsigned IOCTL_SNDCTL_MIDI_INFO;\nextern unsigned IOCTL_SNDCTL_MIDI_PRETIME;\nextern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;\nextern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;\nextern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;\nextern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;\nextern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;\nextern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;\nextern unsigned IOCTL_SNDCTL_SEQ_PANIC;\nextern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;\nextern unsigned IOCTL_SNDCTL_SEQ_RESET;\nextern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;\nextern unsigned IOCTL_SNDCTL_SEQ_SYNC;\nextern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;\nextern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;\nextern unsigned IOCTL_SNDCTL_SYNTH_INFO;\nextern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;\nextern unsigned IOCTL_SNDCTL_TMR_CONTINUE;\nextern unsigned IOCTL_SNDCTL_TMR_METRONOME;\nextern unsigned IOCTL_SNDCTL_TMR_SELECT;\nextern unsigned IOCTL_SNDCTL_TMR_SOURCE;\nextern unsigned IOCTL_SNDCTL_TMR_START;\nextern unsigned IOCTL_SNDCTL_TMR_STOP;\nextern unsigned IOCTL_SNDCTL_TMR_TEMPO;\nextern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;\nextern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;\nextern unsigned IOCTL_SOUND_MIXER_READ_BASS;\nextern unsigned IOCTL_SOUND_MIXER_READ_CAPS;\nextern unsigned IOCTL_SOUND_MIXER_READ_CD;\nextern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;\nextern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;\nextern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;\nextern unsigned IOCTL_SOUND_MIXER_READ_IMIX;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE1;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE2;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE3;\nextern unsigned IOCTL_SOUND_MIXER_READ_LINE;\nextern unsigned IOCTL_SOUND_MIXER_READ_LOUD;\nextern unsigned IOCTL_SOUND_MIXER_READ_MIC;\nextern unsigned IOCTL_SOUND_MIXER_READ_MUTE;\nextern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;\nextern unsigned IOCTL_SOUND_MIXER_READ_PCM;\nextern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;\nextern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;\nextern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;\nextern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;\nextern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;\nextern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;\nextern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;\nextern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_CD;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;\nextern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;\nextern unsigned IOCTL_SOUND_PCM_READ_BITS;\nextern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;\nextern unsigned IOCTL_SOUND_PCM_READ_FILTER;\nextern unsigned IOCTL_SOUND_PCM_READ_RATE;\nextern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;\nextern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;\nextern unsigned IOCTL_VT_ACTIVATE;\nextern unsigned IOCTL_VT_GETMODE;\nextern unsigned IOCTL_VT_OPENQRY;\nextern unsigned IOCTL_VT_RELDISP;\nextern unsigned IOCTL_VT_SETMODE;\nextern unsigned IOCTL_VT_WAITACTIVE;\n#endif  // SANITIZER_LINUX\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nextern unsigned IOCTL_EQL_EMANCIPATE;\nextern unsigned IOCTL_EQL_ENSLAVE;\nextern unsigned IOCTL_EQL_GETMASTRCFG;\nextern unsigned IOCTL_EQL_GETSLAVECFG;\nextern unsigned IOCTL_EQL_SETMASTRCFG;\nextern unsigned IOCTL_EQL_SETSLAVECFG;\nextern unsigned IOCTL_EVIOCGKEYCODE_V2;\nextern unsigned IOCTL_EVIOCGPROP;\nextern unsigned IOCTL_EVIOCSKEYCODE_V2;\nextern unsigned IOCTL_FS_IOC_GETFLAGS;\nextern unsigned IOCTL_FS_IOC_GETVERSION;\nextern unsigned IOCTL_FS_IOC_SETFLAGS;\nextern unsigned IOCTL_FS_IOC_SETVERSION;\nextern unsigned IOCTL_GIO_CMAP;\nextern unsigned IOCTL_GIO_FONT;\nextern unsigned IOCTL_GIO_UNIMAP;\nextern unsigned IOCTL_GIO_UNISCRNMAP;\nextern unsigned IOCTL_KDADDIO;\nextern unsigned IOCTL_KDDELIO;\nextern unsigned IOCTL_KDGETKEYCODE;\nextern unsigned IOCTL_KDGKBDIACR;\nextern unsigned IOCTL_KDGKBENT;\nextern unsigned IOCTL_KDGKBLED;\nextern unsigned IOCTL_KDGKBMETA;\nextern unsigned IOCTL_KDGKBSENT;\nextern unsigned IOCTL_KDMAPDISP;\nextern unsigned IOCTL_KDSETKEYCODE;\nextern unsigned IOCTL_KDSIGACCEPT;\nextern unsigned IOCTL_KDSKBDIACR;\nextern unsigned IOCTL_KDSKBENT;\nextern unsigned IOCTL_KDSKBLED;\nextern unsigned IOCTL_KDSKBMETA;\nextern unsigned IOCTL_KDSKBSENT;\nextern unsigned IOCTL_KDUNMAPDISP;\nextern unsigned IOCTL_LPABORT;\nextern unsigned IOCTL_LPABORTOPEN;\nextern unsigned IOCTL_LPCAREFUL;\nextern unsigned IOCTL_LPCHAR;\nextern unsigned IOCTL_LPGETIRQ;\nextern unsigned IOCTL_LPGETSTATUS;\nextern unsigned IOCTL_LPRESET;\nextern unsigned IOCTL_LPSETIRQ;\nextern unsigned IOCTL_LPTIME;\nextern unsigned IOCTL_LPWAIT;\nextern unsigned IOCTL_MTIOCGETCONFIG;\nextern unsigned IOCTL_MTIOCSETCONFIG;\nextern unsigned IOCTL_PIO_CMAP;\nextern unsigned IOCTL_PIO_FONT;\nextern unsigned IOCTL_PIO_UNIMAP;\nextern unsigned IOCTL_PIO_UNIMAPCLR;\nextern unsigned IOCTL_PIO_UNISCRNMAP;\nextern unsigned IOCTL_SCSI_IOCTL_GET_IDLUN;\nextern unsigned IOCTL_SCSI_IOCTL_PROBE_HOST;\nextern unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE;\nextern unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE;\nextern unsigned IOCTL_SIOCAIPXITFCRT;\nextern unsigned IOCTL_SIOCAIPXPRISLT;\nextern unsigned IOCTL_SIOCAX25ADDUID;\nextern unsigned IOCTL_SIOCAX25DELUID;\nextern unsigned IOCTL_SIOCAX25GETPARMS;\nextern unsigned IOCTL_SIOCAX25GETUID;\nextern unsigned IOCTL_SIOCAX25NOUID;\nextern unsigned IOCTL_SIOCAX25SETPARMS;\nextern unsigned IOCTL_SIOCDEVPLIP;\nextern unsigned IOCTL_SIOCIPXCFGDATA;\nextern unsigned IOCTL_SIOCNRDECOBS;\nextern unsigned IOCTL_SIOCNRGETPARMS;\nextern unsigned IOCTL_SIOCNRRTCTL;\nextern unsigned IOCTL_SIOCNRSETPARMS;\nextern unsigned IOCTL_SNDCTL_DSP_GETISPACE;\nextern unsigned IOCTL_SNDCTL_DSP_GETOSPACE;\nextern unsigned IOCTL_TIOCGSERIAL;\nextern unsigned IOCTL_TIOCSERGETMULTI;\nextern unsigned IOCTL_TIOCSERSETMULTI;\nextern unsigned IOCTL_TIOCSSERIAL;\nextern unsigned IOCTL_GIO_SCRNMAP;\nextern unsigned IOCTL_KDDISABIO;\nextern unsigned IOCTL_KDENABIO;\nextern unsigned IOCTL_KDGETLED;\nextern unsigned IOCTL_KDGETMODE;\nextern unsigned IOCTL_KDGKBMODE;\nextern unsigned IOCTL_KDGKBTYPE;\nextern unsigned IOCTL_KDMKTONE;\nextern unsigned IOCTL_KDSETLED;\nextern unsigned IOCTL_KDSETMODE;\nextern unsigned IOCTL_KDSKBMODE;\nextern unsigned IOCTL_KIOCSOUND;\nextern unsigned IOCTL_PIO_SCRNMAP;\n#endif\n\nextern const int si_SEGV_MAPERR;\nextern const int si_SEGV_ACCERR;\n}  // namespace __sanitizer\n\n#define CHECK_TYPE_SIZE(TYPE) \\\n  COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))\n\n#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER)                      \\\n  COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \\\n                 sizeof(((CLASS *)NULL)->MEMBER));                \\\n  COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) ==         \\\n                 offsetof(CLASS, MEMBER))\n\n// For sigaction, which is a function and struct at the same time,\n// and thus requires explicit \"struct\" in sizeof() expression.\n#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER)                      \\\n  COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \\\n                 sizeof(((struct CLASS *)NULL)->MEMBER));                \\\n  COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) ==         \\\n                 offsetof(struct CLASS, MEMBER))\n\n#define SIGACTION_SYMNAME sigaction\n\n#endif  // SANITIZER_LINUX || SANITIZER_MAC\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_solaris.cpp",
    "content": "//===-- sanitizer_platform_limits_solaris.cpp -----------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific Solaris data structures.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_SOLARIS\n#include <arpa/inet.h>\n#include <dirent.h>\n#include <glob.h>\n#include <grp.h>\n#include <ifaddrs.h>\n#include <limits.h>\n#include <link.h>\n#include <net/if.h>\n#include <net/route.h>\n#include <netdb.h>\n#include <netinet/ip_mroute.h>\n#include <poll.h>\n#include <pthread.h>\n#include <pwd.h>\n#include <rpc/xdr.h>\n#include <semaphore.h>\n#include <signal.h>\n#include <stddef.h>\n#include <sys/ethernet.h>\n#include <sys/filio.h>\n#include <sys/ipc.h>\n#include <sys/mman.h>\n#include <sys/mount.h>\n#include <sys/mtio.h>\n#include <sys/ptyvar.h>\n#include <sys/resource.h>\n#include <sys/shm.h>\n#include <sys/socket.h>\n#include <sys/sockio.h>\n#include <sys/stat.h>\n#include <sys/statfs.h>\n#include <sys/statvfs.h>\n#include <sys/time.h>\n#include <sys/timeb.h>\n#include <sys/times.h>\n#include <sys/types.h>\n#include <sys/utsname.h>\n#include <termios.h>\n#include <time.h>\n#include <utmp.h>\n#include <utmpx.h>\n#include <wchar.h>\n#include <wordexp.h>\n\n// Include these after system headers to avoid name clashes and ambiguities.\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform_limits_solaris.h\"\n\nnamespace __sanitizer {\n  unsigned struct_utsname_sz = sizeof(struct utsname);\n  unsigned struct_stat_sz = sizeof(struct stat);\n  unsigned struct_stat64_sz = sizeof(struct stat64);\n  unsigned struct_rusage_sz = sizeof(struct rusage);\n  unsigned struct_tm_sz = sizeof(struct tm);\n  unsigned struct_passwd_sz = sizeof(struct passwd);\n  unsigned struct_group_sz = sizeof(struct group);\n  unsigned siginfo_t_sz = sizeof(siginfo_t);\n  unsigned struct_sigaction_sz = sizeof(struct sigaction);\n  unsigned struct_stack_t_sz = sizeof(stack_t);\n  unsigned struct_itimerval_sz = sizeof(struct itimerval);\n  unsigned pthread_t_sz = sizeof(pthread_t);\n  unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);\n  unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);\n  unsigned pid_t_sz = sizeof(pid_t);\n  unsigned timeval_sz = sizeof(timeval);\n  unsigned uid_t_sz = sizeof(uid_t);\n  unsigned gid_t_sz = sizeof(gid_t);\n  unsigned mbstate_t_sz = sizeof(mbstate_t);\n  unsigned sigset_t_sz = sizeof(sigset_t);\n  unsigned struct_timezone_sz = sizeof(struct timezone);\n  unsigned struct_tms_sz = sizeof(struct tms);\n  unsigned struct_sigevent_sz = sizeof(struct sigevent);\n  unsigned struct_sched_param_sz = sizeof(struct sched_param);\n  unsigned struct_statfs_sz = sizeof(struct statfs);\n  unsigned struct_sockaddr_sz = sizeof(struct sockaddr);\n  unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }\n  unsigned struct_timespec_sz = sizeof(struct timespec);\n#if SANITIZER_SOLARIS32\n  unsigned struct_statvfs64_sz = sizeof(struct statvfs64);\n#endif\n  unsigned struct_statvfs_sz = sizeof(struct statvfs);\n\n  const uptr sig_ign = (uptr)SIG_IGN;\n  const uptr sig_dfl = (uptr)SIG_DFL;\n  const uptr sig_err = (uptr)SIG_ERR;\n  const uptr sa_siginfo = (uptr)SA_SIGINFO;\n\n  int shmctl_ipc_stat = (int)IPC_STAT;\n\n  unsigned struct_utmp_sz = sizeof(struct utmp);\n  unsigned struct_utmpx_sz = sizeof(struct utmpx);\n\n  int map_fixed = MAP_FIXED;\n\n  int af_inet = (int)AF_INET;\n  int af_inet6 = (int)AF_INET6;\n\n  uptr __sanitizer_in_addr_sz(int af) {\n    if (af == AF_INET)\n      return sizeof(struct in_addr);\n    else if (af == AF_INET6)\n      return sizeof(struct in6_addr);\n    else\n      return 0;\n  }\n\n  unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));\n\n  int glob_nomatch = GLOB_NOMATCH;\n  const int wordexp_wrde_dooffs = WRDE_DOOFFS;\n\n  unsigned path_max = PATH_MAX;\n\n  // ioctl arguments\n  unsigned struct_ifreq_sz = sizeof(struct ifreq);\n  unsigned struct_termios_sz = sizeof(struct termios);\n  unsigned struct_winsize_sz = sizeof(struct winsize);\n\n  unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);\n  unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);\n\n  const unsigned IOCTL_NOT_PRESENT = 0;\n\n  unsigned IOCTL_FIOASYNC = FIOASYNC;\n  unsigned IOCTL_FIOCLEX = FIOCLEX;\n  unsigned IOCTL_FIOGETOWN = FIOGETOWN;\n  unsigned IOCTL_FIONBIO = FIONBIO;\n  unsigned IOCTL_FIONCLEX = FIONCLEX;\n  unsigned IOCTL_FIOSETOWN = FIOSETOWN;\n  unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;\n  unsigned IOCTL_SIOCATMARK = SIOCATMARK;\n  unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;\n  unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;\n  unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;\n  unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;\n  unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;\n  unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;\n  unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;\n  unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;\n  unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;\n  unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;\n  unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;\n  unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;\n  unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;\n  unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;\n  unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;\n  unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;\n  unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;\n  unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;\n  unsigned IOCTL_TIOCEXCL = TIOCEXCL;\n  unsigned IOCTL_TIOCGETD = TIOCGETD;\n  unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;\n  unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;\n  unsigned IOCTL_TIOCMBIC = TIOCMBIC;\n  unsigned IOCTL_TIOCMBIS = TIOCMBIS;\n  unsigned IOCTL_TIOCMGET = TIOCMGET;\n  unsigned IOCTL_TIOCMSET = TIOCMSET;\n  unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;\n  unsigned IOCTL_TIOCNXCL = TIOCNXCL;\n  unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;\n  unsigned IOCTL_TIOCPKT = TIOCPKT;\n  unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;\n  unsigned IOCTL_TIOCSETD = TIOCSETD;\n  unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;\n  unsigned IOCTL_TIOCSTI = TIOCSTI;\n  unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;\n\n  unsigned IOCTL_MTIOCGET = MTIOCGET;\n  unsigned IOCTL_MTIOCTOP = MTIOCTOP;\n\n  const int si_SEGV_MAPERR = SEGV_MAPERR;\n  const int si_SEGV_ACCERR = SEGV_ACCERR;\n} // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nCOMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));\n\nCOMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));\nCHECK_TYPE_SIZE(pthread_key_t);\n\n// There are more undocumented fields in dl_phdr_info that we are not interested\n// in.\nCOMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);\nCHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);\n\n// There are additional fields we are not interested in.\nCOMPILER_CHECK(sizeof(__sanitizer_glob_t) <= sizeof(glob_t));\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);\nCHECK_SIZE_AND_OFFSET(glob_t, gl_offs);\n\nCHECK_TYPE_SIZE(addrinfo);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_family);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);\nCHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);\n\nCHECK_TYPE_SIZE(hostent);\nCHECK_SIZE_AND_OFFSET(hostent, h_name);\nCHECK_SIZE_AND_OFFSET(hostent, h_aliases);\nCHECK_SIZE_AND_OFFSET(hostent, h_addrtype);\nCHECK_SIZE_AND_OFFSET(hostent, h_length);\nCHECK_SIZE_AND_OFFSET(hostent, h_addr_list);\n\nCHECK_TYPE_SIZE(iovec);\nCHECK_SIZE_AND_OFFSET(iovec, iov_base);\nCHECK_SIZE_AND_OFFSET(iovec, iov_len);\n\nCHECK_TYPE_SIZE(msghdr);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_name);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iov);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_control);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);\nCHECK_SIZE_AND_OFFSET(msghdr, msg_flags);\n\nCHECK_TYPE_SIZE(cmsghdr);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);\nCHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);\n\nCOMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));\nCHECK_SIZE_AND_OFFSET(dirent, d_ino);\nCHECK_SIZE_AND_OFFSET(dirent, d_off);\nCHECK_SIZE_AND_OFFSET(dirent, d_reclen);\n\n#if SANITIZER_SOLARIS32\nCOMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));\nCHECK_SIZE_AND_OFFSET(dirent64, d_ino);\nCHECK_SIZE_AND_OFFSET(dirent64, d_off);\nCHECK_SIZE_AND_OFFSET(dirent64, d_reclen);\n#endif\n\nCHECK_TYPE_SIZE(ifconf);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_len);\nCHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);\n\nCHECK_TYPE_SIZE(pollfd);\nCHECK_SIZE_AND_OFFSET(pollfd, fd);\nCHECK_SIZE_AND_OFFSET(pollfd, events);\nCHECK_SIZE_AND_OFFSET(pollfd, revents);\n\nCHECK_TYPE_SIZE(nfds_t);\n\nCHECK_TYPE_SIZE(sigset_t);\n\nCOMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));\n// Can't write checks for sa_handler and sa_sigaction due to them being\n// preprocessor macros.\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);\nCHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);\n\nCHECK_TYPE_SIZE(wordexp_t);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);\nCHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);\n\nCHECK_TYPE_SIZE(tm);\nCHECK_SIZE_AND_OFFSET(tm, tm_sec);\nCHECK_SIZE_AND_OFFSET(tm, tm_min);\nCHECK_SIZE_AND_OFFSET(tm, tm_hour);\nCHECK_SIZE_AND_OFFSET(tm, tm_mday);\nCHECK_SIZE_AND_OFFSET(tm, tm_mon);\nCHECK_SIZE_AND_OFFSET(tm, tm_year);\nCHECK_SIZE_AND_OFFSET(tm, tm_wday);\nCHECK_SIZE_AND_OFFSET(tm, tm_yday);\nCHECK_SIZE_AND_OFFSET(tm, tm_isdst);\n\nCHECK_TYPE_SIZE(ether_addr);\n\nCHECK_TYPE_SIZE(ipc_perm);\nCHECK_SIZE_AND_OFFSET(ipc_perm, key);\nCHECK_SIZE_AND_OFFSET(ipc_perm, seq);\nCHECK_SIZE_AND_OFFSET(ipc_perm, uid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, gid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cuid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, cgid);\nCHECK_SIZE_AND_OFFSET(ipc_perm, mode);\n\nCHECK_TYPE_SIZE(shmid_ds);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);\nCHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);\n\nCHECK_TYPE_SIZE(clock_t);\n\nCHECK_TYPE_SIZE(ifaddrs);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);\n// Compare against the union, because we can't reach into the union in a\n// compliant way.\n#ifdef ifa_dstaddr\n#undef ifa_dstaddr\n#endif\nCOMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==\n               sizeof(((ifaddrs *)nullptr)->ifa_ifu));\nCOMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==\n               offsetof(ifaddrs, ifa_ifu));\nCHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);\n\nCHECK_TYPE_SIZE(timeb);\nCHECK_SIZE_AND_OFFSET(timeb, time);\nCHECK_SIZE_AND_OFFSET(timeb, millitm);\nCHECK_SIZE_AND_OFFSET(timeb, timezone);\nCHECK_SIZE_AND_OFFSET(timeb, dstflag);\n\nCHECK_TYPE_SIZE(passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_name);\nCHECK_SIZE_AND_OFFSET(passwd, pw_passwd);\nCHECK_SIZE_AND_OFFSET(passwd, pw_uid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_gid);\nCHECK_SIZE_AND_OFFSET(passwd, pw_dir);\nCHECK_SIZE_AND_OFFSET(passwd, pw_shell);\n\nCHECK_SIZE_AND_OFFSET(passwd, pw_gecos);\n\nCHECK_TYPE_SIZE(group);\nCHECK_SIZE_AND_OFFSET(group, gr_name);\nCHECK_SIZE_AND_OFFSET(group, gr_passwd);\nCHECK_SIZE_AND_OFFSET(group, gr_gid);\nCHECK_SIZE_AND_OFFSET(group, gr_mem);\n\nCHECK_TYPE_SIZE(XDR);\nCHECK_SIZE_AND_OFFSET(XDR, x_op);\nCHECK_SIZE_AND_OFFSET(XDR, x_ops);\nCHECK_SIZE_AND_OFFSET(XDR, x_public);\nCHECK_SIZE_AND_OFFSET(XDR, x_private);\nCHECK_SIZE_AND_OFFSET(XDR, x_base);\nCHECK_SIZE_AND_OFFSET(XDR, x_handy);\nCOMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);\nCOMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);\nCOMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);\n\nCHECK_TYPE_SIZE(sem_t);\n\n#endif  // SANITIZER_SOLARIS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_platform_limits_solaris.h",
    "content": "//===-- sanitizer_platform_limits_solaris.h -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer common code.\n//\n// Sizes and layouts of platform-specific Solaris data structures.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PLATFORM_LIMITS_SOLARIS_H\n#define SANITIZER_PLATFORM_LIMITS_SOLARIS_H\n\n#if SANITIZER_SOLARIS\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform.h\"\n\nnamespace __sanitizer {\nextern unsigned struct_utsname_sz;\nextern unsigned struct_stat_sz;\nextern unsigned struct_stat64_sz;\nextern unsigned struct_rusage_sz;\nextern unsigned siginfo_t_sz;\nextern unsigned struct_itimerval_sz;\nextern unsigned pthread_t_sz;\nextern unsigned pthread_mutex_t_sz;\nextern unsigned pthread_cond_t_sz;\nextern unsigned pid_t_sz;\nextern unsigned timeval_sz;\nextern unsigned uid_t_sz;\nextern unsigned gid_t_sz;\nextern unsigned mbstate_t_sz;\nextern unsigned struct_timezone_sz;\nextern unsigned struct_tms_sz;\nextern unsigned struct_itimerspec_sz;\nextern unsigned struct_sigevent_sz;\nextern unsigned struct_stack_t_sz;\nextern unsigned struct_sched_param_sz;\nextern unsigned struct_statfs64_sz;\nextern unsigned struct_statfs_sz;\nextern unsigned struct_sockaddr_sz;\nunsigned ucontext_t_sz(void *ctx);\n\nextern unsigned struct_timespec_sz;\nextern unsigned struct_rlimit_sz;\nextern unsigned struct_utimbuf_sz;\n\nstruct __sanitizer_sem_t {\n  //u64 data[6];\n  u32 sem_count;\n  u16 sem_type;\n  u16 sem_magic;\n  u64 sem_pad1[3];\n  u64 sem_pad2[2];\n};\n\nstruct __sanitizer_ipc_perm {\n  unsigned int uid;           // uid_t\n  unsigned int gid;           // gid_t\n  unsigned int cuid;          // uid_t\n  unsigned int cgid;          // gid_t\n  unsigned int mode;          // mode_t\n  unsigned int seq;           // uint_t\n  int key;                    // key_t\n#if !defined(_LP64)\n  int pad[4];\n#endif\n};\n\nstruct __sanitizer_shmid_ds {\n  __sanitizer_ipc_perm shm_perm;\n  unsigned long shm_segsz;    // size_t\n  unsigned long shm_flags;    // uintptr_t\n  unsigned short shm_lkcnt;   // ushort_t\n  int shm_lpid;               // pid_t\n  int shm_cpid;               // pid_t\n  unsigned long shm_nattch;   // shmatt_t\n  unsigned long shm_cnattch;  // ulong_t\n#if defined(_LP64)\n  long shm_atime;             // time_t\n  long shm_dtime;\n  long shm_ctime;\n  void *shm_amp;\n  u64 shm_gransize;           // uint64_t\n  u64 shm_allocated;          // uint64_t\n  u64 shm_pad4[1];            // int64_t\n#else\n  long shm_atime;             // time_t\n  int shm_pad1;               // int32_t\n  long shm_dtime;             // time_t\n  int shm_pad2;               // int32_t\n  long shm_ctime;             // time_t\n  void *shm_amp;\n  u64 shm_gransize;           // uint64_t\n  u64 shm_allocated;          // uint64_t\n#endif\n};\n\nextern unsigned struct_statvfs_sz;\n#if SANITIZER_SOLARIS32\nextern unsigned struct_statvfs64_sz;\n#endif\n\nstruct __sanitizer_iovec {\n  void *iov_base;\n  uptr iov_len;\n};\n\nstruct __sanitizer_ifaddrs {\n  struct __sanitizer_ifaddrs *ifa_next;\n  char *ifa_name;\n  u64 ifa_flags;     // uint64_t\n  void *ifa_addr;    // (struct sockaddr *)\n  void *ifa_netmask; // (struct sockaddr *)\n  // This is a union on Linux.\n# ifdef ifa_dstaddr\n# undef ifa_dstaddr\n# endif\n  void *ifa_dstaddr; // (struct sockaddr *)\n  void *ifa_data;\n};\n\ntypedef unsigned __sanitizer_pthread_key_t;\n\nstruct __sanitizer_XDR {\n  int x_op;\n  void *x_ops;\n  uptr x_public;\n  uptr x_private;\n  uptr x_base;\n  unsigned x_handy;\n};\n\nconst int __sanitizer_XDR_ENCODE = 0;\nconst int __sanitizer_XDR_DECODE = 1;\nconst int __sanitizer_XDR_FREE = 2;\n\nstruct __sanitizer_passwd {\n  char *pw_name;\n  char *pw_passwd;\n  unsigned int pw_uid;    // uid_t\n  unsigned int pw_gid;    // gid_t\n  char *pw_age;\n  char *pw_comment;\n  char *pw_gecos;\n  char *pw_dir;\n  char *pw_shell;\n};\n\nstruct __sanitizer_group {\n  char *gr_name;\n  char *gr_passwd;\n  int gr_gid;\n  char **gr_mem;\n};\n\ntypedef long __sanitizer_time_t;\n\ntypedef long __sanitizer_suseconds_t;\n\nstruct __sanitizer_timeval {\n  __sanitizer_time_t tv_sec;\n  __sanitizer_suseconds_t tv_usec;\n};\n\nstruct __sanitizer_itimerval {\n  struct __sanitizer_timeval it_interval;\n  struct __sanitizer_timeval it_value;\n};\n\nstruct __sanitizer_timeb {\n  __sanitizer_time_t time;\n  unsigned short millitm;\n  short timezone;\n  short dstflag;\n};\n\nstruct __sanitizer_ether_addr {\n  u8 octet[6];\n};\n\nstruct __sanitizer_tm {\n  int tm_sec;\n  int tm_min;\n  int tm_hour;\n  int tm_mday;\n  int tm_mon;\n  int tm_year;\n  int tm_wday;\n  int tm_yday;\n  int tm_isdst;\n};\n\nstruct __sanitizer_msghdr {\n  void *msg_name;\n  unsigned msg_namelen;\n  struct __sanitizer_iovec *msg_iov;\n  unsigned msg_iovlen;\n  void *msg_control;\n  unsigned msg_controllen;\n  int msg_flags;\n};\nstruct __sanitizer_cmsghdr {\n  unsigned cmsg_len;\n  int cmsg_level;\n  int cmsg_type;\n};\n\n#if SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)\nstruct __sanitizer_dirent {\n  unsigned long long d_ino;\n  long long d_off;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n#else\nstruct __sanitizer_dirent {\n  unsigned long d_ino;\n  long d_off;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n#endif\n\nstruct __sanitizer_dirent64 {\n  unsigned long long d_ino;\n  unsigned long long d_off;\n  unsigned short d_reclen;\n  // more fields that we don't care about\n};\n\ntypedef long __sanitizer_clock_t;\ntypedef int __sanitizer_clockid_t;\n\n// This thing depends on the platform. We are only interested in the upper\n// limit. Verified with a compiler assert in .cpp.\nunion __sanitizer_pthread_attr_t {\n  char size[128];\n  void *align;\n};\n\nstruct __sanitizer_sigset_t {\n  // uint32_t * 4\n  unsigned int __bits[4];\n};\n\nstruct __sanitizer_siginfo {\n  // The size is determined by looking at sizeof of real siginfo_t on linux.\n  u64 opaque[128 / sizeof(u64)];\n};\n\nusing __sanitizer_sighandler_ptr = void (*)(int sig);\nusing __sanitizer_sigactionhandler_ptr =\n    void (*)(int sig, __sanitizer_siginfo *siginfo, void *uctx);\n\nstruct __sanitizer_sigaction {\n  int sa_flags;\n  union {\n    __sanitizer_sigactionhandler_ptr sigaction;\n    __sanitizer_sighandler_ptr handler;\n  };\n  __sanitizer_sigset_t sa_mask;\n#if !defined(_LP64)\n  int sa_resv[2];\n#endif\n};\n\nstruct __sanitizer_kernel_sigset_t {\n  u8 sig[8];\n};\n\nstruct __sanitizer_kernel_sigaction_t {\n  union {\n    void (*handler)(int signo);\n    void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);\n  };\n  unsigned long sa_flags;\n  void (*sa_restorer)(void);\n  __sanitizer_kernel_sigset_t sa_mask;\n};\n\nextern const uptr sig_ign;\nextern const uptr sig_dfl;\nextern const uptr sig_err;\nextern const uptr sa_siginfo;\n\nextern int af_inet;\nextern int af_inet6;\nuptr __sanitizer_in_addr_sz(int af);\n\nstruct __sanitizer_dl_phdr_info {\n  uptr dlpi_addr;\n  const char *dlpi_name;\n  const void *dlpi_phdr;\n  short dlpi_phnum;\n};\n\nextern unsigned struct_ElfW_Phdr_sz;\n\nstruct __sanitizer_addrinfo {\n  int ai_flags;\n  int ai_family;\n  int ai_socktype;\n  int ai_protocol;\n#if defined(__sparcv9)\n  int _ai_pad;\n#endif\n  unsigned ai_addrlen;\n  char *ai_canonname;\n  void *ai_addr;\n  struct __sanitizer_addrinfo *ai_next;\n};\n\nstruct __sanitizer_hostent {\n  char *h_name;\n  char **h_aliases;\n  int h_addrtype;\n  int h_length;\n  char **h_addr_list;\n};\n\nstruct __sanitizer_pollfd {\n  int fd;\n  short events;\n  short revents;\n};\n\ntypedef unsigned long __sanitizer_nfds_t;\n\nstruct __sanitizer_glob_t {\n  uptr gl_pathc;\n  char **gl_pathv;\n  uptr gl_offs;\n  char **gl_pathp;\n  int gl_pathn;\n};\n\nextern int glob_nomatch;\nextern int glob_altdirfunc;\nextern const int wordexp_wrde_dooffs;\n\nextern unsigned path_max;\n\nstruct __sanitizer_wordexp_t {\n  uptr we_wordc;\n  char **we_wordv;\n  uptr we_offs;\n  char **we_wordp;\n  int we_wordn;\n};\n\ntypedef void __sanitizer_FILE;\n#define SANITIZER_HAS_STRUCT_FILE 0\n\n// This simplifies generic code\n#define struct_shminfo_sz -1\n#define struct_shm_info_sz -1\n#define shmctl_shm_stat -1\n#define shmctl_ipc_info -1\n#define shmctl_shm_info -1\n\nextern int shmctl_ipc_stat;\n\nextern unsigned struct_utmp_sz;\nextern unsigned struct_utmpx_sz;\n\nextern int map_fixed;\n\n// ioctl arguments\nstruct __sanitizer_ifconf {\n  int ifc_len;\n  union {\n    void *ifcu_req;\n  } ifc_ifcu;\n};\n\n// <sys/ioccom.h>\n#define IOC_NRBITS 8\n#define IOC_TYPEBITS 8\n#define IOC_SIZEBITS 12\n#define IOC_DIRBITS 4\n#undef IOC_NONE\n#define IOC_NONE 2U     // IOC_VOID\n#define IOC_READ 4U     // IOC_OUT\n#define IOC_WRITE 8U    // IOC_IN\n\n#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)\n#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)\n#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)\n#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)\n#define IOC_NRSHIFT 0\n#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)\n#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)\n#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)\n\n#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)\n#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)\n#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)\n\n#if defined(__sparc__)\n// In sparc the 14 bits SIZE field overlaps with the\n// least significant bit of DIR, so either IOC_READ or\n// IOC_WRITE shall be 1 in order to get a non-zero SIZE.\n#define IOC_SIZE(nr) \\\n  ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))\n#else\n#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)\n#endif\n\nextern unsigned struct_ifreq_sz;\nextern unsigned struct_termios_sz;\nextern unsigned struct_winsize_sz;\n\nextern unsigned struct_sioc_sg_req_sz;\nextern unsigned struct_sioc_vif_req_sz;\n\n// ioctl request identifiers\n\n// A special value to mark ioctls that are not present on the target platform,\n// when it can not be determined without including any system headers.\nextern const unsigned IOCTL_NOT_PRESENT;\n\nextern unsigned IOCTL_FIOASYNC;\nextern unsigned IOCTL_FIOCLEX;\nextern unsigned IOCTL_FIOGETOWN;\nextern unsigned IOCTL_FIONBIO;\nextern unsigned IOCTL_FIONCLEX;\nextern unsigned IOCTL_FIOSETOWN;\nextern unsigned IOCTL_SIOCADDMULTI;\nextern unsigned IOCTL_SIOCATMARK;\nextern unsigned IOCTL_SIOCDELMULTI;\nextern unsigned IOCTL_SIOCGIFADDR;\nextern unsigned IOCTL_SIOCGIFBRDADDR;\nextern unsigned IOCTL_SIOCGIFCONF;\nextern unsigned IOCTL_SIOCGIFDSTADDR;\nextern unsigned IOCTL_SIOCGIFFLAGS;\nextern unsigned IOCTL_SIOCGIFMETRIC;\nextern unsigned IOCTL_SIOCGIFMTU;\nextern unsigned IOCTL_SIOCGIFNETMASK;\nextern unsigned IOCTL_SIOCGPGRP;\nextern unsigned IOCTL_SIOCSIFADDR;\nextern unsigned IOCTL_SIOCSIFBRDADDR;\nextern unsigned IOCTL_SIOCSIFDSTADDR;\nextern unsigned IOCTL_SIOCSIFFLAGS;\nextern unsigned IOCTL_SIOCSIFMETRIC;\nextern unsigned IOCTL_SIOCSIFMTU;\nextern unsigned IOCTL_SIOCSIFNETMASK;\nextern unsigned IOCTL_SIOCSPGRP;\nextern unsigned IOCTL_TIOCEXCL;\nextern unsigned IOCTL_TIOCGETD;\nextern unsigned IOCTL_TIOCGPGRP;\nextern unsigned IOCTL_TIOCGWINSZ;\nextern unsigned IOCTL_TIOCMBIC;\nextern unsigned IOCTL_TIOCMBIS;\nextern unsigned IOCTL_TIOCMGET;\nextern unsigned IOCTL_TIOCMSET;\nextern unsigned IOCTL_TIOCNOTTY;\nextern unsigned IOCTL_TIOCNXCL;\nextern unsigned IOCTL_TIOCOUTQ;\nextern unsigned IOCTL_TIOCPKT;\nextern unsigned IOCTL_TIOCSCTTY;\nextern unsigned IOCTL_TIOCSETD;\nextern unsigned IOCTL_TIOCSPGRP;\nextern unsigned IOCTL_TIOCSTI;\nextern unsigned IOCTL_TIOCSWINSZ;\nextern unsigned IOCTL_MTIOCGET;\nextern unsigned IOCTL_MTIOCTOP;\n\nextern const int si_SEGV_MAPERR;\nextern const int si_SEGV_ACCERR;\n}  // namespace __sanitizer\n\n#define CHECK_TYPE_SIZE(TYPE) \\\n  COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))\n\n#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER)                       \\\n  COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \\\n                 sizeof(((CLASS *) NULL)->MEMBER));                \\\n  COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) ==          \\\n                 offsetof(CLASS, MEMBER))\n\n// For sigaction, which is a function and struct at the same time,\n// and thus requires explicit \"struct\" in sizeof() expression.\n#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER)                       \\\n  COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \\\n                 sizeof(((struct CLASS *) NULL)->MEMBER));                \\\n  COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) ==          \\\n                 offsetof(struct CLASS, MEMBER))\n\n#endif  // SANITIZER_SOLARIS\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_posix.cpp",
    "content": "//===-- sanitizer_posix.cpp -----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and implements POSIX-specific functions from\n// sanitizer_posix.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_POSIX\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_posix.h\"\n#include \"sanitizer_procmaps.h\"\n\n#include <errno.h>\n#include <fcntl.h>\n#include <signal.h>\n#include <sys/mman.h>\n\n#if SANITIZER_FREEBSD\n// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before\n// that, it was never implemented.  So just define it to zero.\n#undef  MAP_NORESERVE\n#define MAP_NORESERVE 0\n#endif\n\nnamespace __sanitizer {\n\n// ------------- sanitizer_common.h\nuptr GetMmapGranularity() {\n  return GetPageSize();\n}\n\nvoid *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {\n  size = RoundUpTo(size, GetPageSizeCached());\n  uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,\n                       MAP_PRIVATE | MAP_ANON, mem_type);\n  int reserrno;\n  if (UNLIKELY(internal_iserror(res, &reserrno)))\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\", reserrno, raw_report);\n  IncreaseTotalMmap(size);\n  return (void *)res;\n}\n\nvoid UnmapOrDie(void *addr, uptr size) {\n  if (!addr || !size) return;\n  uptr res = internal_munmap(addr, size);\n  if (UNLIKELY(internal_iserror(res))) {\n    Report(\"ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\\n\",\n           SanitizerToolName, size, size, addr);\n    CHECK(\"unable to unmap\" && 0);\n  }\n  DecreaseTotalMmap(size);\n}\n\nvoid *MmapOrDieOnFatalError(uptr size, const char *mem_type) {\n  size = RoundUpTo(size, GetPageSizeCached());\n  uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,\n                       MAP_PRIVATE | MAP_ANON, mem_type);\n  int reserrno;\n  if (UNLIKELY(internal_iserror(res, &reserrno))) {\n    if (reserrno == ENOMEM)\n      return nullptr;\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\", reserrno);\n  }\n  IncreaseTotalMmap(size);\n  return (void *)res;\n}\n\n// We want to map a chunk of address space aligned to 'alignment'.\n// We do it by mapping a bit more and then unmapping redundant pieces.\n// We probably can do it with fewer syscalls in some OS-dependent way.\nvoid *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,\n                                   const char *mem_type) {\n  CHECK(IsPowerOfTwo(size));\n  CHECK(IsPowerOfTwo(alignment));\n  uptr map_size = size + alignment;\n  uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);\n  if (UNLIKELY(!map_res))\n    return nullptr;\n  uptr map_end = map_res + map_size;\n  uptr res = map_res;\n  if (!IsAligned(res, alignment)) {\n    res = (map_res + alignment - 1) & ~(alignment - 1);\n    UnmapOrDie((void*)map_res, res - map_res);\n  }\n  uptr end = res + size;\n  if (end != map_end)\n    UnmapOrDie((void*)end, map_end - end);\n  return (void*)res;\n}\n\nvoid *MmapNoReserveOrDie(uptr size, const char *mem_type) {\n  size = RoundUpTo(size, GetPageSizeCached());\n  uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,\n                     MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);\n  int reserrno;\n  if (UNLIKELY(internal_iserror(p, &reserrno)))\n    ReportMmapFailureAndDie(size, mem_type, \"allocate noreserve\", reserrno);\n  IncreaseTotalMmap(size);\n  return (void *)p;\n}\n\nstatic void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,\n                           const char *name) {\n  size = RoundUpTo(size, GetPageSizeCached());\n  fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());\n  uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,\n                     MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);\n  int reserrno;\n  if (UNLIKELY(internal_iserror(p, &reserrno))) {\n    if (tolerate_enomem && reserrno == ENOMEM)\n      return nullptr;\n    char mem_type[40];\n    internal_snprintf(mem_type, sizeof(mem_type), \"memory at address 0x%zx\",\n                      fixed_addr);\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\", reserrno);\n  }\n  IncreaseTotalMmap(size);\n  return (void *)p;\n}\n\nvoid *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {\n  return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);\n}\n\nvoid *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {\n  return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);\n}\n\nbool MprotectNoAccess(uptr addr, uptr size) {\n  return 0 == internal_mprotect((void*)addr, size, PROT_NONE);\n}\n\nbool MprotectReadOnly(uptr addr, uptr size) {\n  return 0 == internal_mprotect((void *)addr, size, PROT_READ);\n}\n\n#if !SANITIZER_MAC\nvoid MprotectMallocZones(void *addr, int prot) {}\n#endif\n\nfd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {\n  if (ShouldMockFailureToOpen(filename))\n    return kInvalidFd;\n  int flags;\n  switch (mode) {\n    case RdOnly: flags = O_RDONLY; break;\n    case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;\n    case RdWr: flags = O_RDWR | O_CREAT; break;\n  }\n  fd_t res = internal_open(filename, flags, 0660);\n  if (internal_iserror(res, errno_p))\n    return kInvalidFd;\n  return ReserveStandardFds(res);\n}\n\nvoid CloseFile(fd_t fd) {\n  internal_close(fd);\n}\n\nbool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,\n                  error_t *error_p) {\n  uptr res = internal_read(fd, buff, buff_size);\n  if (internal_iserror(res, error_p))\n    return false;\n  if (bytes_read)\n    *bytes_read = res;\n  return true;\n}\n\nbool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,\n                 error_t *error_p) {\n  uptr res = internal_write(fd, buff, buff_size);\n  if (internal_iserror(res, error_p))\n    return false;\n  if (bytes_written)\n    *bytes_written = res;\n  return true;\n}\n\nvoid *MapFileToMemory(const char *file_name, uptr *buff_size) {\n  fd_t fd = OpenFile(file_name, RdOnly);\n  CHECK(fd != kInvalidFd);\n  uptr fsize = internal_filesize(fd);\n  CHECK_NE(fsize, (uptr)-1);\n  CHECK_GT(fsize, 0);\n  *buff_size = RoundUpTo(fsize, GetPageSizeCached());\n  uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);\n  return internal_iserror(map) ? nullptr : (void *)map;\n}\n\nvoid *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {\n  uptr flags = MAP_SHARED;\n  if (addr) flags |= MAP_FIXED;\n  uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);\n  int mmap_errno = 0;\n  if (internal_iserror(p, &mmap_errno)) {\n    Printf(\"could not map writable file (%d, %lld, %zu): %zd, errno: %d\\n\",\n           fd, (long long)offset, size, p, mmap_errno);\n    return nullptr;\n  }\n  return (void *)p;\n}\n\nstatic inline bool IntervalsAreSeparate(uptr start1, uptr end1,\n                                        uptr start2, uptr end2) {\n  CHECK(start1 <= end1);\n  CHECK(start2 <= end2);\n  return (end1 < start2) || (end2 < start1);\n}\n\n// FIXME: this is thread-unsafe, but should not cause problems most of the time.\n// When the shadow is mapped only a single thread usually exists (plus maybe\n// several worker threads on Mac, which aren't expected to map big chunks of\n// memory).\nbool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {\n  MemoryMappingLayout proc_maps(/*cache_enabled*/true);\n  if (proc_maps.Error())\n    return true; // and hope for the best\n  MemoryMappedSegment segment;\n  while (proc_maps.Next(&segment)) {\n    if (segment.start == segment.end) continue;  // Empty range.\n    CHECK_NE(0, segment.end);\n    if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,\n                              range_end))\n      return false;\n  }\n  return true;\n}\n\n#if !SANITIZER_MAC\nvoid DumpProcessMap() {\n  MemoryMappingLayout proc_maps(/*cache_enabled*/true);\n  const sptr kBufSize = 4095;\n  char *filename = (char*)MmapOrDie(kBufSize, __func__);\n  MemoryMappedSegment segment(filename, kBufSize);\n  Report(\"Process memory map follows:\\n\");\n  while (proc_maps.Next(&segment)) {\n    Printf(\"\\t%p-%p\\t%s\\n\", (void *)segment.start, (void *)segment.end,\n           segment.filename);\n  }\n  Report(\"End of process memory map.\\n\");\n  UnmapOrDie(filename, kBufSize);\n}\n#endif\n\nconst char *GetPwd() {\n  return GetEnv(\"PWD\");\n}\n\nbool IsPathSeparator(const char c) {\n  return c == '/';\n}\n\nbool IsAbsolutePath(const char *path) {\n  return path != nullptr && IsPathSeparator(path[0]);\n}\n\nvoid ReportFile::Write(const char *buffer, uptr length) {\n  SpinMutexLock l(mu);\n  ReopenIfNecessary();\n  internal_write(fd, buffer, length);\n}\n\nbool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {\n  MemoryMappingLayout proc_maps(/*cache_enabled*/false);\n  InternalMmapVector<char> buff(kMaxPathLength);\n  MemoryMappedSegment segment(buff.data(), buff.size());\n  while (proc_maps.Next(&segment)) {\n    if (segment.IsExecutable() &&\n        internal_strcmp(module, segment.filename) == 0) {\n      *start = segment.start;\n      *end = segment.end;\n      return true;\n    }\n  }\n  return false;\n}\n\nuptr SignalContext::GetAddress() const {\n  auto si = static_cast<const siginfo_t *>(siginfo);\n  return (uptr)si->si_addr;\n}\n\nbool SignalContext::IsMemoryAccess() const {\n  auto si = static_cast<const siginfo_t *>(siginfo);\n  return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;\n}\n\nint SignalContext::GetType() const {\n  return static_cast<const siginfo_t *>(siginfo)->si_signo;\n}\n\nconst char *SignalContext::Describe() const {\n  switch (GetType()) {\n    case SIGFPE:\n      return \"FPE\";\n    case SIGILL:\n      return \"ILL\";\n    case SIGABRT:\n      return \"ABRT\";\n    case SIGSEGV:\n      return \"SEGV\";\n    case SIGBUS:\n      return \"BUS\";\n    case SIGTRAP:\n      return \"TRAP\";\n  }\n  return \"UNKNOWN SIGNAL\";\n}\n\nfd_t ReserveStandardFds(fd_t fd) {\n  CHECK_GE(fd, 0);\n  if (fd > 2)\n    return fd;\n  bool used[3];\n  internal_memset(used, 0, sizeof(used));\n  while (fd <= 2) {\n    used[fd] = true;\n    fd = internal_dup(fd);\n  }\n  for (int i = 0; i <= 2; ++i)\n    if (used[i])\n      internal_close(i);\n  return fd;\n}\n\nbool ShouldMockFailureToOpen(const char *path) {\n  return common_flags()->test_only_emulate_no_memorymap &&\n         internal_strncmp(path, \"/proc/\", 6) == 0;\n}\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO\nint GetNamedMappingFd(const char *name, uptr size, int *flags) {\n  if (!common_flags()->decorate_proc_maps || !name)\n    return -1;\n  char shmname[200];\n  CHECK(internal_strlen(name) < sizeof(shmname) - 10);\n  internal_snprintf(shmname, sizeof(shmname), \"/dev/shm/%zu [%s]\",\n                    internal_getpid(), name);\n  int o_cloexec = 0;\n#if defined(O_CLOEXEC)\n  o_cloexec = O_CLOEXEC;\n#endif\n  int fd = ReserveStandardFds(\n      internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));\n  CHECK_GE(fd, 0);\n  int res = internal_ftruncate(fd, size);\n#if !defined(O_CLOEXEC)\n  res = fcntl(fd, F_SETFD, FD_CLOEXEC);\n  CHECK_EQ(0, res);\n#endif\n  CHECK_EQ(0, res);\n  res = internal_unlink(shmname);\n  CHECK_EQ(0, res);\n  *flags &= ~(MAP_ANON | MAP_ANONYMOUS);\n  return fd;\n}\n#else\nint GetNamedMappingFd(const char *name, uptr size, int *flags) {\n  return -1;\n}\n#endif\n\n#if SANITIZER_ANDROID\n#define PR_SET_VMA 0x53564d41\n#define PR_SET_VMA_ANON_NAME 0\nvoid DecorateMapping(uptr addr, uptr size, const char *name) {\n  if (!common_flags()->decorate_proc_maps || !name)\n    return;\n  internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);\n}\n#else\nvoid DecorateMapping(uptr addr, uptr size, const char *name) {\n}\n#endif\n\nuptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {\n  int fd = GetNamedMappingFd(name, length, &flags);\n  uptr res = internal_mmap(addr, length, prot, flags, fd, 0);\n  if (!internal_iserror(res))\n    DecorateMapping(res, length, name);\n  return res;\n}\n\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_POSIX\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_posix.h",
    "content": "//===-- sanitizer_posix.h -------------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and declares some useful POSIX-specific functions.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_POSIX_H\n#define SANITIZER_POSIX_H\n\n// ----------- ATTENTION -------------\n// This header should NOT include any other headers from sanitizer runtime.\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform_limits_freebsd.h\"\n#include \"sanitizer_platform_limits_netbsd.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_platform_limits_solaris.h\"\n\n#if SANITIZER_POSIX\n\nnamespace __sanitizer {\n\n// I/O\n// Don't use directly, use __sanitizer::OpenFile() instead.\nuptr internal_open(const char *filename, int flags);\nuptr internal_open(const char *filename, int flags, u32 mode);\nuptr internal_close(fd_t fd);\n\nuptr internal_read(fd_t fd, void *buf, uptr count);\nuptr internal_write(fd_t fd, const void *buf, uptr count);\n\n// Memory\nuptr internal_mmap(void *addr, uptr length, int prot, int flags,\n                   int fd, u64 offset);\nuptr internal_munmap(void *addr, uptr length);\n#if SANITIZER_LINUX\nuptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,\n                     void *new_address);\n#endif\nint internal_mprotect(void *addr, uptr length, int prot);\nint internal_madvise(uptr addr, uptr length, int advice);\n\n// OS\nuptr internal_filesize(fd_t fd);  // -1 on error.\nuptr internal_stat(const char *path, void *buf);\nuptr internal_lstat(const char *path, void *buf);\nuptr internal_fstat(fd_t fd, void *buf);\nuptr internal_dup(int oldfd);\nuptr internal_dup2(int oldfd, int newfd);\nuptr internal_readlink(const char *path, char *buf, uptr bufsize);\nuptr internal_unlink(const char *path);\nuptr internal_rename(const char *oldpath, const char *newpath);\nuptr internal_lseek(fd_t fd, OFF_T offset, int whence);\n\n#if SANITIZER_NETBSD\nuptr internal_ptrace(int request, int pid, void *addr, int data);\n#else\nuptr internal_ptrace(int request, int pid, void *addr, void *data);\n#endif\nuptr internal_waitpid(int pid, int *status, int options);\n\nint internal_fork();\nfd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid);\n\nint internal_sysctl(const int *name, unsigned int namelen, void *oldp,\n                    uptr *oldlenp, const void *newp, uptr newlen);\nint internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,\n                          const void *newp, uptr newlen);\n\n// These functions call appropriate pthread_ functions directly, bypassing\n// the interceptor. They are weak and may not be present in some tools.\nSANITIZER_WEAK_ATTRIBUTE\nint real_pthread_create(void *th, void *attr, void *(*callback)(void *),\n                        void *param);\nSANITIZER_WEAK_ATTRIBUTE\nint real_pthread_join(void *th, void **ret);\n\n#define DEFINE_REAL_PTHREAD_FUNCTIONS                                          \\\n  namespace __sanitizer {                                                      \\\n  int real_pthread_create(void *th, void *attr, void *(*callback)(void *),     \\\n                          void *param) {                                       \\\n    return REAL(pthread_create)(th, attr, callback, param);                    \\\n  }                                                                            \\\n  int real_pthread_join(void *th, void **ret) {                                \\\n    return REAL(pthread_join(th, ret));                                        \\\n  }                                                                            \\\n  }  // namespace __sanitizer\n\nint my_pthread_attr_getstack(void *attr, void **addr, uptr *size);\n\n// A routine named real_sigaction() must be implemented by each sanitizer in\n// order for internal_sigaction() to bypass interceptors.\nint internal_sigaction(int signum, const void *act, void *oldact);\nvoid internal_sigfillset(__sanitizer_sigset_t *set);\nvoid internal_sigemptyset(__sanitizer_sigset_t *set);\nbool internal_sigismember(__sanitizer_sigset_t *set, int signum);\n\nuptr internal_execve(const char *filename, char *const argv[],\n                     char *const envp[]);\n\nbool IsStateDetached(int state);\n\n// Move the fd out of {0, 1, 2} range.\nfd_t ReserveStandardFds(fd_t fd);\n\nbool ShouldMockFailureToOpen(const char *path);\n\n// Create a non-file mapping with a given /proc/self/maps name.\nuptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name);\n\n// Platforms should implement at most one of these.\n// 1. Provide a pre-decorated file descriptor to use instead of an anonymous\n// mapping.\nint GetNamedMappingFd(const char *name, uptr size, int *flags);\n// 2. Add name to an existing anonymous mapping. The caller must keep *name\n// alive at least as long as the mapping exists.\nvoid DecorateMapping(uptr addr, uptr size, const char *name);\n\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_POSIX\n\n#endif  // SANITIZER_POSIX_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_posix_libcdep.cpp",
    "content": "//===-- sanitizer_posix_libcdep.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and implements libc-dependent POSIX-specific functions\n// from sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_POSIX\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_platform_limits_netbsd.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_platform_limits_solaris.h\"\n#include \"sanitizer_posix.h\"\n#include \"sanitizer_procmaps.h\"\n\n#include <errno.h>\n#include <fcntl.h>\n#include <pthread.h>\n#include <signal.h>\n#include <stdlib.h>\n#include <sys/mman.h>\n#include <sys/resource.h>\n#include <sys/stat.h>\n#include <sys/time.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\n#if SANITIZER_FREEBSD\n// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before\n// that, it was never implemented.  So just define it to zero.\n#undef MAP_NORESERVE\n#define MAP_NORESERVE 0\n#endif\n\ntypedef void (*sa_sigaction_t)(int, siginfo_t *, void *);\n\nnamespace __sanitizer {\n\nu32 GetUid() {\n  return getuid();\n}\n\nuptr GetThreadSelf() {\n  return (uptr)pthread_self();\n}\n\nvoid ReleaseMemoryPagesToOS(uptr beg, uptr end) {\n  uptr page_size = GetPageSizeCached();\n  uptr beg_aligned = RoundUpTo(beg, page_size);\n  uptr end_aligned = RoundDownTo(end, page_size);\n  if (beg_aligned < end_aligned)\n    internal_madvise(beg_aligned, end_aligned - beg_aligned,\n                     SANITIZER_MADVISE_DONTNEED);\n}\n\nvoid SetShadowRegionHugePageMode(uptr addr, uptr size) {\n#ifdef MADV_NOHUGEPAGE  // May not be defined on old systems.\n  if (common_flags()->no_huge_pages_for_shadow)\n    internal_madvise(addr, size, MADV_NOHUGEPAGE);\n  else\n    internal_madvise(addr, size, MADV_HUGEPAGE);\n#endif  // MADV_NOHUGEPAGE\n}\n\nbool DontDumpShadowMemory(uptr addr, uptr length) {\n#if defined(MADV_DONTDUMP)\n  return internal_madvise(addr, length, MADV_DONTDUMP) == 0;\n#elif defined(MADV_NOCORE)\n  return internal_madvise(addr, length, MADV_NOCORE) == 0;\n#else\n  return true;\n#endif  // MADV_DONTDUMP\n}\n\nstatic rlim_t getlim(int res) {\n  rlimit rlim;\n  CHECK_EQ(0, getrlimit(res, &rlim));\n  return rlim.rlim_cur;\n}\n\nstatic void setlim(int res, rlim_t lim) {\n  struct rlimit rlim;\n  if (getrlimit(res, const_cast<struct rlimit *>(&rlim))) {\n    Report(\"ERROR: %s getrlimit() failed %d\\n\", SanitizerToolName, errno);\n    Die();\n  }\n  rlim.rlim_cur = lim;\n  if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) {\n    Report(\"ERROR: %s setrlimit() failed %d\\n\", SanitizerToolName, errno);\n    Die();\n  }\n}\n\nvoid DisableCoreDumperIfNecessary() {\n  if (common_flags()->disable_coredump) {\n    setlim(RLIMIT_CORE, 0);\n  }\n}\n\nbool StackSizeIsUnlimited() {\n  rlim_t stack_size = getlim(RLIMIT_STACK);\n  return (stack_size == RLIM_INFINITY);\n}\n\nvoid SetStackSizeLimitInBytes(uptr limit) {\n  setlim(RLIMIT_STACK, (rlim_t)limit);\n  CHECK(!StackSizeIsUnlimited());\n}\n\nbool AddressSpaceIsUnlimited() {\n  rlim_t as_size = getlim(RLIMIT_AS);\n  return (as_size == RLIM_INFINITY);\n}\n\nvoid SetAddressSpaceUnlimited() {\n  setlim(RLIMIT_AS, RLIM_INFINITY);\n  CHECK(AddressSpaceIsUnlimited());\n}\n\nvoid Abort() {\n#if !SANITIZER_GO\n  // If we are handling SIGABRT, unhandle it first.\n  // TODO(vitalybuka): Check if handler belongs to sanitizer.\n  if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {\n    struct sigaction sigact;\n    internal_memset(&sigact, 0, sizeof(sigact));\n    sigact.sa_handler = SIG_DFL;\n    internal_sigaction(SIGABRT, &sigact, nullptr);\n  }\n#endif\n\n  abort();\n}\n\nint Atexit(void (*function)(void)) {\n#if !SANITIZER_GO\n  return atexit(function);\n#else\n  return 0;\n#endif\n}\n\nbool CreateDir(const char *pathname) { return mkdir(pathname, 0755) == 0; }\n\nbool SupportsColoredOutput(fd_t fd) {\n  return isatty(fd) != 0;\n}\n\n#if !SANITIZER_GO\n// TODO(glider): different tools may require different altstack size.\nstatic uptr GetAltStackSize() {\n  // Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be\n  // more costly that you think. However GetAltStackSize is only call 2-3 times\n  // per thread so don't cache the evaluation.\n  return SIGSTKSZ * 4;\n}\n\nvoid SetAlternateSignalStack() {\n  stack_t altstack, oldstack;\n  CHECK_EQ(0, sigaltstack(nullptr, &oldstack));\n  // If the alternate stack is already in place, do nothing.\n  // Android always sets an alternate stack, but it's too small for us.\n  if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;\n  // TODO(glider): the mapped stack should have the MAP_STACK flag in the\n  // future. It is not required by man 2 sigaltstack now (they're using\n  // malloc()).\n  altstack.ss_size = GetAltStackSize();\n  altstack.ss_sp = (char *)MmapOrDie(altstack.ss_size, __func__);\n  altstack.ss_flags = 0;\n  CHECK_EQ(0, sigaltstack(&altstack, nullptr));\n}\n\nvoid UnsetAlternateSignalStack() {\n  stack_t altstack, oldstack;\n  altstack.ss_sp = nullptr;\n  altstack.ss_flags = SS_DISABLE;\n  altstack.ss_size = GetAltStackSize();  // Some sane value required on Darwin.\n  CHECK_EQ(0, sigaltstack(&altstack, &oldstack));\n  UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);\n}\n\nstatic void MaybeInstallSigaction(int signum,\n                                  SignalHandlerType handler) {\n  if (GetHandleSignalMode(signum) == kHandleSignalNo) return;\n\n  struct sigaction sigact;\n  internal_memset(&sigact, 0, sizeof(sigact));\n  sigact.sa_sigaction = (sa_sigaction_t)handler;\n  // Do not block the signal from being received in that signal's handler.\n  // Clients are responsible for handling this correctly.\n  sigact.sa_flags = SA_SIGINFO | SA_NODEFER;\n  if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;\n  CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));\n  VReport(1, \"Installed the sigaction for signal %d\\n\", signum);\n}\n\nvoid InstallDeadlySignalHandlers(SignalHandlerType handler) {\n  // Set the alternate signal stack for the main thread.\n  // This will cause SetAlternateSignalStack to be called twice, but the stack\n  // will be actually set only once.\n  if (common_flags()->use_sigaltstack) SetAlternateSignalStack();\n  MaybeInstallSigaction(SIGSEGV, handler);\n  MaybeInstallSigaction(SIGBUS, handler);\n  MaybeInstallSigaction(SIGABRT, handler);\n  MaybeInstallSigaction(SIGFPE, handler);\n  MaybeInstallSigaction(SIGILL, handler);\n  MaybeInstallSigaction(SIGTRAP, handler);\n}\n\nbool SignalContext::IsStackOverflow() const {\n  // Access at a reasonable offset above SP, or slightly below it (to account\n  // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is\n  // probably a stack overflow.\n#ifdef __s390__\n  // On s390, the fault address in siginfo points to start of the page, not\n  // to the precise word that was accessed.  Mask off the low bits of sp to\n  // take it into account.\n  bool IsStackAccess = addr >= (sp & ~0xFFF) && addr < sp + 0xFFFF;\n#else\n  // Let's accept up to a page size away from top of stack. Things like stack\n  // probing can trigger accesses with such large offsets.\n  bool IsStackAccess = addr + GetPageSizeCached() > sp && addr < sp + 0xFFFF;\n#endif\n\n#if __powerpc__\n  // Large stack frames can be allocated with e.g.\n  //   lis r0,-10000\n  //   stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000\n  // If the store faults then sp will not have been updated, so test above\n  // will not work, because the fault address will be more than just \"slightly\"\n  // below sp.\n  if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {\n    u32 inst = *(unsigned *)pc;\n    u32 ra = (inst >> 16) & 0x1F;\n    u32 opcd = inst >> 26;\n    u32 xo = (inst >> 1) & 0x3FF;\n    // Check for store-with-update to sp. The instructions we accept are:\n    //   stbu rs,d(ra)          stbux rs,ra,rb\n    //   sthu rs,d(ra)          sthux rs,ra,rb\n    //   stwu rs,d(ra)          stwux rs,ra,rb\n    //   stdu rs,ds(ra)         stdux rs,ra,rb\n    // where ra is r1 (the stack pointer).\n    if (ra == 1 &&\n        (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||\n         (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))\n      IsStackAccess = true;\n  }\n#endif  // __powerpc__\n\n  // We also check si_code to filter out SEGV caused by something else other\n  // then hitting the guard page or unmapped memory, like, for example,\n  // unaligned memory access.\n  auto si = static_cast<const siginfo_t *>(siginfo);\n  return IsStackAccess &&\n         (si->si_code == si_SEGV_MAPERR || si->si_code == si_SEGV_ACCERR);\n}\n\n#endif  // SANITIZER_GO\n\nbool IsAccessibleMemoryRange(uptr beg, uptr size) {\n  uptr page_size = GetPageSizeCached();\n  // Checking too large memory ranges is slow.\n  CHECK_LT(size, page_size * 10);\n  int sock_pair[2];\n  if (pipe(sock_pair))\n    return false;\n  uptr bytes_written =\n      internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);\n  int write_errno;\n  bool result;\n  if (internal_iserror(bytes_written, &write_errno)) {\n    CHECK_EQ(EFAULT, write_errno);\n    result = false;\n  } else {\n    result = (bytes_written == size);\n  }\n  internal_close(sock_pair[0]);\n  internal_close(sock_pair[1]);\n  return result;\n}\n\nvoid PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {\n  // Some kinds of sandboxes may forbid filesystem access, so we won't be able\n  // to read the file mappings from /proc/self/maps. Luckily, neither the\n  // process will be able to load additional libraries, so it's fine to use the\n  // cached mappings.\n  MemoryMappingLayout::CacheMemoryMappings();\n}\n\nstatic bool MmapFixed(uptr fixed_addr, uptr size, int additional_flags,\n                      const char *name) {\n  size = RoundUpTo(size, GetPageSizeCached());\n  fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());\n  uptr p =\n      MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,\n                MAP_PRIVATE | MAP_FIXED | additional_flags | MAP_ANON, name);\n  int reserrno;\n  if (internal_iserror(p, &reserrno)) {\n    Report(\"ERROR: %s failed to \"\n           \"allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\\n\",\n           SanitizerToolName, size, size, fixed_addr, reserrno);\n    return false;\n  }\n  IncreaseTotalMmap(size);\n  return true;\n}\n\nbool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {\n  return MmapFixed(fixed_addr, size, MAP_NORESERVE, name);\n}\n\nbool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {\n#if SANITIZER_FREEBSD\n  if (common_flags()->no_huge_pages_for_shadow)\n    return MmapFixedNoReserve(fixed_addr, size, name);\n  // MAP_NORESERVE is implicit with FreeBSD\n  return MmapFixed(fixed_addr, size, MAP_ALIGNED_SUPER, name);\n#else\n  bool r = MmapFixedNoReserve(fixed_addr, size, name);\n  if (r)\n    SetShadowRegionHugePageMode(fixed_addr, size);\n  return r;\n#endif\n}\n\nuptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {\n  base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size, name)\n                     : MmapNoAccess(size);\n  size_ = size;\n  name_ = name;\n  (void)os_handle_;  // unsupported\n  return reinterpret_cast<uptr>(base_);\n}\n\n// Uses fixed_addr for now.\n// Will use offset instead once we've implemented this function for real.\nuptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {\n  return reinterpret_cast<uptr>(\n      MmapFixedOrDieOnFatalError(fixed_addr, size, name));\n}\n\nuptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,\n                                    const char *name) {\n  return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size, name));\n}\n\nvoid ReservedAddressRange::Unmap(uptr addr, uptr size) {\n  CHECK_LE(size, size_);\n  if (addr == reinterpret_cast<uptr>(base_))\n    // If we unmap the whole range, just null out the base.\n    base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);\n  else\n    CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);\n  size_ -= size;\n  UnmapOrDie(reinterpret_cast<void*>(addr), size);\n}\n\nvoid *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {\n  return (void *)MmapNamed((void *)fixed_addr, size, PROT_NONE,\n                           MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,\n                           name);\n}\n\nvoid *MmapNoAccess(uptr size) {\n  unsigned flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;\n  return (void *)internal_mmap(nullptr, size, PROT_NONE, flags, -1, 0);\n}\n\n// This function is defined elsewhere if we intercepted pthread_attr_getstack.\nextern \"C\" {\nSANITIZER_WEAK_ATTRIBUTE int\nreal_pthread_attr_getstack(void *attr, void **addr, size_t *size);\n} // extern \"C\"\n\nint my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {\n#if !SANITIZER_GO && !SANITIZER_MAC\n  if (&real_pthread_attr_getstack)\n    return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,\n                                      (size_t *)size);\n#endif\n  return pthread_attr_getstack((pthread_attr_t *)attr, addr, (size_t *)size);\n}\n\n#if !SANITIZER_GO\nvoid AdjustStackSize(void *attr_) {\n  pthread_attr_t *attr = (pthread_attr_t *)attr_;\n  uptr stackaddr = 0;\n  uptr stacksize = 0;\n  my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);\n  // GLibC will return (0 - stacksize) as the stack address in the case when\n  // stacksize is set, but stackaddr is not.\n  bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);\n  // We place a lot of tool data into TLS, account for that.\n  const uptr minstacksize = GetTlsSize() + 128*1024;\n  if (stacksize < minstacksize) {\n    if (!stack_set) {\n      if (stacksize != 0) {\n        VPrintf(1, \"Sanitizer: increasing stacksize %zu->%zu\\n\", stacksize,\n                minstacksize);\n        pthread_attr_setstacksize(attr, minstacksize);\n      }\n    } else {\n      Printf(\"Sanitizer: pre-allocated stack size is insufficient: \"\n             \"%zu < %zu\\n\", stacksize, minstacksize);\n      Printf(\"Sanitizer: pthread_create is likely to fail.\\n\");\n    }\n  }\n}\n#endif // !SANITIZER_GO\n\npid_t StartSubprocess(const char *program, const char *const argv[],\n                      const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,\n                      fd_t stderr_fd) {\n  auto file_closer = at_scope_exit([&] {\n    if (stdin_fd != kInvalidFd) {\n      internal_close(stdin_fd);\n    }\n    if (stdout_fd != kInvalidFd) {\n      internal_close(stdout_fd);\n    }\n    if (stderr_fd != kInvalidFd) {\n      internal_close(stderr_fd);\n    }\n  });\n\n  int pid = internal_fork();\n\n  if (pid < 0) {\n    int rverrno;\n    if (internal_iserror(pid, &rverrno)) {\n      Report(\"WARNING: failed to fork (errno %d)\\n\", rverrno);\n    }\n    return pid;\n  }\n\n  if (pid == 0) {\n    // Child subprocess\n    if (stdin_fd != kInvalidFd) {\n      internal_close(STDIN_FILENO);\n      internal_dup2(stdin_fd, STDIN_FILENO);\n      internal_close(stdin_fd);\n    }\n    if (stdout_fd != kInvalidFd) {\n      internal_close(STDOUT_FILENO);\n      internal_dup2(stdout_fd, STDOUT_FILENO);\n      internal_close(stdout_fd);\n    }\n    if (stderr_fd != kInvalidFd) {\n      internal_close(STDERR_FILENO);\n      internal_dup2(stderr_fd, STDERR_FILENO);\n      internal_close(stderr_fd);\n    }\n\n    for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd);\n\n    internal_execve(program, const_cast<char **>(&argv[0]),\n                    const_cast<char *const *>(envp));\n    internal__exit(1);\n  }\n\n  return pid;\n}\n\nbool IsProcessRunning(pid_t pid) {\n  int process_status;\n  uptr waitpid_status = internal_waitpid(pid, &process_status, WNOHANG);\n  int local_errno;\n  if (internal_iserror(waitpid_status, &local_errno)) {\n    VReport(1, \"Waiting on the process failed (errno %d).\\n\", local_errno);\n    return false;\n  }\n  return waitpid_status == 0;\n}\n\nint WaitForProcess(pid_t pid) {\n  int process_status;\n  uptr waitpid_status = internal_waitpid(pid, &process_status, 0);\n  int local_errno;\n  if (internal_iserror(waitpid_status, &local_errno)) {\n    VReport(1, \"Waiting on the process failed (errno %d).\\n\", local_errno);\n    return -1;\n  }\n  return process_status;\n}\n\nbool IsStateDetached(int state) {\n  return state == PTHREAD_CREATE_DETACHED;\n}\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_POSIX\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_printf.cpp",
    "content": "//===-- sanitizer_printf.cpp ----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer.\n//\n// Internal printf function, used inside run-time libraries.\n// We can't use libc printf because we intercept some of the functions used\n// inside it.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_libc.h\"\n\n#include <stdio.h>\n#include <stdarg.h>\n\n#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 &&               \\\n      !defined(va_copy)\n# define va_copy(dst, src) ((dst) = (src))\n#endif\n\nnamespace __sanitizer {\n\nstatic int AppendChar(char **buff, const char *buff_end, char c) {\n  if (*buff < buff_end) {\n    **buff = c;\n    (*buff)++;\n  }\n  return 1;\n}\n\n// Appends number in a given base to buffer. If its length is less than\n// |minimal_num_length|, it is padded with leading zeroes or spaces, depending\n// on the value of |pad_with_zero|.\nstatic int AppendNumber(char **buff, const char *buff_end, u64 absolute_value,\n                        u8 base, u8 minimal_num_length, bool pad_with_zero,\n                        bool negative, bool uppercase) {\n  uptr const kMaxLen = 30;\n  RAW_CHECK(base == 10 || base == 16);\n  RAW_CHECK(base == 10 || !negative);\n  RAW_CHECK(absolute_value || !negative);\n  RAW_CHECK(minimal_num_length < kMaxLen);\n  int result = 0;\n  if (negative && minimal_num_length)\n    --minimal_num_length;\n  if (negative && pad_with_zero)\n    result += AppendChar(buff, buff_end, '-');\n  uptr num_buffer[kMaxLen];\n  int pos = 0;\n  do {\n    RAW_CHECK_MSG((uptr)pos < kMaxLen, \"AppendNumber buffer overflow\");\n    num_buffer[pos++] = absolute_value % base;\n    absolute_value /= base;\n  } while (absolute_value > 0);\n  if (pos < minimal_num_length) {\n    // Make sure compiler doesn't insert call to memset here.\n    internal_memset(&num_buffer[pos], 0,\n                    sizeof(num_buffer[0]) * (minimal_num_length - pos));\n    pos = minimal_num_length;\n  }\n  RAW_CHECK(pos > 0);\n  pos--;\n  for (; pos >= 0 && num_buffer[pos] == 0; pos--) {\n    char c = (pad_with_zero || pos == 0) ? '0' : ' ';\n    result += AppendChar(buff, buff_end, c);\n  }\n  if (negative && !pad_with_zero) result += AppendChar(buff, buff_end, '-');\n  for (; pos >= 0; pos--) {\n    char digit = static_cast<char>(num_buffer[pos]);\n    digit = (digit < 10) ? '0' + digit : (uppercase ? 'A' : 'a') + digit - 10;\n    result += AppendChar(buff, buff_end, digit);\n  }\n  return result;\n}\n\nstatic int AppendUnsigned(char **buff, const char *buff_end, u64 num, u8 base,\n                          u8 minimal_num_length, bool pad_with_zero,\n                          bool uppercase) {\n  return AppendNumber(buff, buff_end, num, base, minimal_num_length,\n                      pad_with_zero, false /* negative */, uppercase);\n}\n\nstatic int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,\n                               u8 minimal_num_length, bool pad_with_zero) {\n  bool negative = (num < 0);\n  return AppendNumber(buff, buff_end, (u64)(negative ? -num : num), 10,\n                      minimal_num_length, pad_with_zero, negative,\n                      false /* uppercase */);\n}\n\n\n// Use the fact that explicitly requesting 0 width (%0s) results in UB and\n// interpret width == 0 as \"no width requested\":\n// width == 0 - no width requested\n// width  < 0 - left-justify s within and pad it to -width chars, if necessary\n// width  > 0 - right-justify s, not implemented yet\nstatic int AppendString(char **buff, const char *buff_end, int width,\n                        int max_chars, const char *s) {\n  if (!s)\n    s = \"<null>\";\n  int result = 0;\n  for (; *s; s++) {\n    if (max_chars >= 0 && result >= max_chars)\n      break;\n    result += AppendChar(buff, buff_end, *s);\n  }\n  // Only the left justified strings are supported.\n  while (width < -result)\n    result += AppendChar(buff, buff_end, ' ');\n  return result;\n}\n\nstatic int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {\n  int result = 0;\n  result += AppendString(buff, buff_end, 0, -1, \"0x\");\n  result += AppendUnsigned(buff, buff_end, ptr_value, 16,\n                           SANITIZER_POINTER_FORMAT_LENGTH,\n                           true /* pad_with_zero */, false /* uppercase */);\n  return result;\n}\n\nint VSNPrintf(char *buff, int buff_length,\n              const char *format, va_list args) {\n  static const char *kPrintfFormatsHelp =\n      \"Supported Printf formats: %([0-9]*)?(z|l|ll)?{d,u,x,X}; %p; \"\n      \"%[-]([0-9]*)?(\\\\.\\\\*)?s; %c\\nProvided format: \";\n  RAW_CHECK(format);\n  RAW_CHECK(buff_length > 0);\n  const char *buff_end = &buff[buff_length - 1];\n  const char *cur = format;\n  int result = 0;\n  for (; *cur; cur++) {\n    if (*cur != '%') {\n      result += AppendChar(&buff, buff_end, *cur);\n      continue;\n    }\n    cur++;\n    bool left_justified = *cur == '-';\n    if (left_justified)\n      cur++;\n    bool have_width = (*cur >= '0' && *cur <= '9');\n    bool pad_with_zero = (*cur == '0');\n    int width = 0;\n    if (have_width) {\n      while (*cur >= '0' && *cur <= '9') {\n        width = width * 10 + *cur++ - '0';\n      }\n    }\n    bool have_precision = (cur[0] == '.' && cur[1] == '*');\n    int precision = -1;\n    if (have_precision) {\n      cur += 2;\n      precision = va_arg(args, int);\n    }\n    bool have_z = (*cur == 'z');\n    cur += have_z;\n    bool have_l = cur[0] == 'l' && cur[1] != 'l';\n    cur += have_l;\n    bool have_ll = cur[0] == 'l' && cur[1] == 'l';\n    cur += have_ll * 2;\n    const bool have_length = have_z || have_l || have_ll;\n    const bool have_flags = have_width || have_length;\n    // At the moment only %s supports precision and left-justification.\n    CHECK(!((precision >= 0 || left_justified) && *cur != 's'));\n    switch (*cur) {\n      case 'd': {\n        s64 dval = have_ll  ? va_arg(args, s64)\n                   : have_z ? va_arg(args, sptr)\n                   : have_l ? va_arg(args, long)\n                            : va_arg(args, int);\n        result += AppendSignedDecimal(&buff, buff_end, dval, width,\n                                      pad_with_zero);\n        break;\n      }\n      case 'u':\n      case 'x':\n      case 'X': {\n        u64 uval = have_ll  ? va_arg(args, u64)\n                   : have_z ? va_arg(args, uptr)\n                   : have_l ? va_arg(args, unsigned long)\n                            : va_arg(args, unsigned);\n        bool uppercase = (*cur == 'X');\n        result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,\n                                 width, pad_with_zero, uppercase);\n        break;\n      }\n      case 'p': {\n        RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);\n        result += AppendPointer(&buff, buff_end, va_arg(args, uptr));\n        break;\n      }\n      case 's': {\n        RAW_CHECK_VA(!have_length, kPrintfFormatsHelp, format);\n        // Only left-justified width is supported.\n        CHECK(!have_width || left_justified);\n        result += AppendString(&buff, buff_end, left_justified ? -width : width,\n                               precision, va_arg(args, char*));\n        break;\n      }\n      case 'c': {\n        RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);\n        result += AppendChar(&buff, buff_end, va_arg(args, int));\n        break;\n      }\n      case '%' : {\n        RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);\n        result += AppendChar(&buff, buff_end, '%');\n        break;\n      }\n      default: {\n        RAW_CHECK_VA(false, kPrintfFormatsHelp, format);\n      }\n    }\n  }\n  RAW_CHECK(buff <= buff_end);\n  AppendChar(&buff, buff_end + 1, '\\0');\n  return result;\n}\n\nstatic void (*PrintfAndReportCallback)(const char *);\nvoid SetPrintfAndReportCallback(void (*callback)(const char *)) {\n  PrintfAndReportCallback = callback;\n}\n\n// Can be overriden in frontend.\n#if SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)\n// Implementation must be defined in frontend.\nextern \"C\" void __sanitizer_on_print(const char *str);\n#else\nSANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_on_print, const char *str) {\n  (void)str;\n}\n#endif\n\nstatic void CallPrintfAndReportCallback(const char *str) {\n  __sanitizer_on_print(str);\n  if (PrintfAndReportCallback)\n    PrintfAndReportCallback(str);\n}\n\nstatic void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,\n                                              char *local_buffer,\n                                              int buffer_size,\n                                              const char *format,\n                                              va_list args) {\n  va_list args2;\n  va_copy(args2, args);\n  InternalMmapVector<char> v;\n  int needed_length = 0;\n  char *buffer = local_buffer;\n  // First try to print a message using a local buffer, and then fall back to\n  // mmaped buffer.\n  for (int use_mmap = 0;; use_mmap++) {\n    if (use_mmap) {\n      va_end(args);\n      va_copy(args, args2);\n      v.resize(needed_length + 1);\n      buffer_size = v.capacity();\n      v.resize(buffer_size);\n      buffer = &v[0];\n    }\n    needed_length = 0;\n    // Fuchsia's logging infrastructure always keeps track of the logging\n    // process, thread, and timestamp, so never prepend such information.\n    if (!SANITIZER_FUCHSIA && append_pid) {\n      int pid = internal_getpid();\n      const char *exe_name = GetProcessName();\n      if (common_flags()->log_exe_name && exe_name) {\n        needed_length += internal_snprintf(buffer, buffer_size,\n                                           \"==%s\", exe_name);\n        if (needed_length >= buffer_size)\n          continue;\n      }\n      needed_length += internal_snprintf(\n          buffer + needed_length, buffer_size - needed_length, \"==%d==\", pid);\n      if (needed_length >= buffer_size)\n        continue;\n    }\n    needed_length += VSNPrintf(buffer + needed_length,\n                               buffer_size - needed_length, format, args);\n    if (needed_length >= buffer_size)\n      continue;\n    // If the message fit into the buffer, print it and exit.\n    break;\n  }\n  RawWrite(buffer);\n\n  // Remove color sequences from the message.\n  RemoveANSIEscapeSequencesFromString(buffer);\n  CallPrintfAndReportCallback(buffer);\n  LogMessageOnPrintf(buffer);\n\n  va_end(args2);\n}\n\nstatic void NOINLINE SharedPrintfCode(bool append_pid, const char *format,\n                                      va_list args) {\n  // |local_buffer| is small enough not to overflow the stack and/or violate\n  // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other\n  // hand, the bigger the buffer is, the more the chance the error report will\n  // fit into it.\n  char local_buffer[400];\n  SharedPrintfCodeNoBuffer(append_pid, local_buffer, ARRAY_SIZE(local_buffer),\n                           format, args);\n}\n\nvoid Printf(const char *format, ...) {\n  va_list args;\n  va_start(args, format);\n  SharedPrintfCode(false, format, args);\n  va_end(args);\n}\n\n// Like Printf, but prints the current PID before the output string.\nvoid Report(const char *format, ...) {\n  va_list args;\n  va_start(args, format);\n  SharedPrintfCode(true, format, args);\n  va_end(args);\n}\n\n// Writes at most \"length\" symbols to \"buffer\" (including trailing '\\0').\n// Returns the number of symbols that should have been written to buffer\n// (not including trailing '\\0'). Thus, the string is truncated\n// iff return value is not less than \"length\".\nint internal_snprintf(char *buffer, uptr length, const char *format, ...) {\n  va_list args;\n  va_start(args, format);\n  int needed_length = VSNPrintf(buffer, length, format, args);\n  va_end(args);\n  return needed_length;\n}\n\nvoid InternalScopedString::append(const char *format, ...) {\n  uptr prev_len = length();\n\n  while (true) {\n    buffer_.resize(buffer_.capacity());\n\n    va_list args;\n    va_start(args, format);\n    uptr sz = VSNPrintf(buffer_.data() + prev_len, buffer_.size() - prev_len,\n                        format, args);\n    va_end(args);\n    if (sz < buffer_.size() - prev_len) {\n      buffer_.resize(prev_len + sz + 1);\n      break;\n    }\n\n    buffer_.reserve(buffer_.capacity() * 2);\n  }\n  CHECK_EQ(buffer_[length()], '\\0');\n}\n\n} // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps.h",
    "content": "//===-- sanitizer_procmaps.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer.\n//\n// Information about the process mappings.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_PROCMAPS_H\n#define SANITIZER_PROCMAPS_H\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \\\n    SANITIZER_MAC || SANITIZER_SOLARIS ||  \\\n    SANITIZER_FUCHSIA\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_fuchsia.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_mac.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\n// Memory protection masks.\nstatic const uptr kProtectionRead = 1;\nstatic const uptr kProtectionWrite = 2;\nstatic const uptr kProtectionExecute = 4;\nstatic const uptr kProtectionShared = 8;\n\nstruct MemoryMappedSegmentData;\n\nclass MemoryMappedSegment {\n public:\n  explicit MemoryMappedSegment(char *buff = nullptr, uptr size = 0)\n      : filename(buff), filename_size(size), data_(nullptr) {}\n  ~MemoryMappedSegment() {}\n\n  bool IsReadable() const { return protection & kProtectionRead; }\n  bool IsWritable() const { return protection & kProtectionWrite; }\n  bool IsExecutable() const { return protection & kProtectionExecute; }\n  bool IsShared() const { return protection & kProtectionShared; }\n\n  void AddAddressRanges(LoadedModule *module);\n\n  uptr start;\n  uptr end;\n  uptr offset;\n  char *filename;  // owned by caller\n  uptr filename_size;\n  uptr protection;\n  ModuleArch arch;\n  u8 uuid[kModuleUUIDSize];\n\n private:\n  friend class MemoryMappingLayout;\n\n  // This field is assigned and owned by MemoryMappingLayout if needed\n  MemoryMappedSegmentData *data_;\n};\n\nclass MemoryMappingLayoutBase {\n public:\n  virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }\n  virtual bool Error() const { UNIMPLEMENTED(); };\n  virtual void Reset() { UNIMPLEMENTED(); }\n\n protected:\n  ~MemoryMappingLayoutBase() {}\n};\n\nclass MemoryMappingLayout final : public MemoryMappingLayoutBase {\n public:\n  explicit MemoryMappingLayout(bool cache_enabled);\n  ~MemoryMappingLayout();\n  virtual bool Next(MemoryMappedSegment *segment) override;\n  virtual bool Error() const override;\n  virtual void Reset() override;\n  // In some cases, e.g. when running under a sandbox on Linux, ASan is unable\n  // to obtain the memory mappings. It should fall back to pre-cached data\n  // instead of aborting.\n  static void CacheMemoryMappings();\n\n  // Adds all mapped objects into a vector.\n  void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);\n\n private:\n  void LoadFromCache();\n\n  MemoryMappingLayoutData data_;\n};\n\n// Returns code range for the specified module.\nbool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);\n\nbool IsDecimal(char c);\nuptr ParseDecimal(const char **p);\nbool IsHex(char c);\nuptr ParseHex(const char **p);\n\n}  // namespace __sanitizer\n\n#endif\n#endif  // SANITIZER_PROCMAPS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps_bsd.cpp",
    "content": "//===-- sanitizer_procmaps_bsd.cpp ----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Information about the process mappings\n// (FreeBSD and NetBSD-specific parts).\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_FREEBSD || SANITIZER_NETBSD\n#include \"sanitizer_common.h\"\n#if SANITIZER_FREEBSD\n#include \"sanitizer_freebsd.h\"\n#endif\n#include \"sanitizer_procmaps.h\"\n\n// clang-format off\n#include <sys/types.h>\n#include <sys/sysctl.h>\n// clang-format on\n#include <unistd.h>\n#if SANITIZER_FREEBSD\n#include <sys/user.h>\n#endif\n\n#include <limits.h>\n\n// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.\n#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)\n#include <osreldate.h>\n#if __FreeBSD_version <= 902001 // v9.2\n#define kinfo_vmentry xkinfo_vmentry\n#endif\n#endif\n\nnamespace __sanitizer {\n\nvoid ReadProcMaps(ProcSelfMapsBuff *proc_maps) {\n  const int Mib[] = {\n#if SANITIZER_FREEBSD\n    CTL_KERN,\n    KERN_PROC,\n    KERN_PROC_VMMAP,\n    getpid()\n#elif SANITIZER_NETBSD\n    CTL_VM,\n    VM_PROC,\n    VM_PROC_MAP,\n    getpid(),\n    sizeof(struct kinfo_vmentry)\n#else\n#error \"not supported\"\n#endif\n  };\n\n  uptr Size = 0;\n  int Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), NULL, &Size, NULL, 0);\n  CHECK_EQ(Err, 0);\n  CHECK_GT(Size, 0);\n\n  size_t MmapedSize = Size * 4 / 3;\n  void *VmMap = MmapOrDie(MmapedSize, \"ReadProcMaps()\");\n  Size = MmapedSize;\n  Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), VmMap, &Size, NULL, 0);\n  CHECK_EQ(Err, 0);\n  proc_maps->data = (char *)VmMap;\n  proc_maps->mmaped_size = MmapedSize;\n  proc_maps->len = Size;\n}\n\nbool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {\n  CHECK(!Error()); // can not fail\n  char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;\n  if (data_.current >= last)\n    return false;\n  const struct kinfo_vmentry *VmEntry =\n      (const struct kinfo_vmentry *)data_.current;\n\n  segment->start = (uptr)VmEntry->kve_start;\n  segment->end = (uptr)VmEntry->kve_end;\n  segment->offset = (uptr)VmEntry->kve_offset;\n\n  segment->protection = 0;\n  if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)\n    segment->protection |= kProtectionRead;\n  if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)\n    segment->protection |= kProtectionWrite;\n  if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)\n    segment->protection |= kProtectionExecute;\n\n  if (segment->filename != NULL && segment->filename_size > 0) {\n    internal_snprintf(segment->filename,\n                      Min(segment->filename_size, (uptr)PATH_MAX), \"%s\",\n                      VmEntry->kve_path);\n  }\n\n#if SANITIZER_FREEBSD\n  data_.current += VmEntry->kve_structsize;\n#else\n  data_.current += sizeof(*VmEntry);\n#endif\n\n  return true;\n}\n\n} // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps_common.cpp",
    "content": "//===-- sanitizer_procmaps_common.cpp -------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Information about the process mappings (common parts).\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||                \\\n    SANITIZER_SOLARIS\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_procmaps.h\"\n\nnamespace __sanitizer {\n\nstatic ProcSelfMapsBuff cached_proc_self_maps;\nstatic StaticSpinMutex cache_lock;\n\nstatic int TranslateDigit(char c) {\n  if (c >= '0' && c <= '9')\n    return c - '0';\n  if (c >= 'a' && c <= 'f')\n    return c - 'a' + 10;\n  if (c >= 'A' && c <= 'F')\n    return c - 'A' + 10;\n  return -1;\n}\n\n// Parse a number and promote 'p' up to the first non-digit character.\nstatic uptr ParseNumber(const char **p, int base) {\n  uptr n = 0;\n  int d;\n  CHECK(base >= 2 && base <= 16);\n  while ((d = TranslateDigit(**p)) >= 0 && d < base) {\n    n = n * base + d;\n    (*p)++;\n  }\n  return n;\n}\n\nbool IsDecimal(char c) {\n  int d = TranslateDigit(c);\n  return d >= 0 && d < 10;\n}\n\nuptr ParseDecimal(const char **p) {\n  return ParseNumber(p, 10);\n}\n\nbool IsHex(char c) {\n  int d = TranslateDigit(c);\n  return d >= 0 && d < 16;\n}\n\nuptr ParseHex(const char **p) {\n  return ParseNumber(p, 16);\n}\n\nvoid MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {\n  // data_ should be unused on this platform\n  CHECK(!data_);\n  module->addAddressRange(start, end, IsExecutable(), IsWritable());\n}\n\nMemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {\n  // FIXME: in the future we may want to cache the mappings on demand only.\n  if (cache_enabled)\n    CacheMemoryMappings();\n\n  // Read maps after the cache update to capture the maps/unmaps happening in\n  // the process of updating.\n  ReadProcMaps(&data_.proc_self_maps);\n  if (cache_enabled && data_.proc_self_maps.mmaped_size == 0)\n    LoadFromCache();\n\n  Reset();\n}\n\nbool MemoryMappingLayout::Error() const {\n  return data_.current == nullptr;\n}\n\nMemoryMappingLayout::~MemoryMappingLayout() {\n  // Only unmap the buffer if it is different from the cached one. Otherwise\n  // it will be unmapped when the cache is refreshed.\n  if (data_.proc_self_maps.data != cached_proc_self_maps.data)\n    UnmapOrDie(data_.proc_self_maps.data, data_.proc_self_maps.mmaped_size);\n}\n\nvoid MemoryMappingLayout::Reset() {\n  data_.current = data_.proc_self_maps.data;\n}\n\n// static\nvoid MemoryMappingLayout::CacheMemoryMappings() {\n  ProcSelfMapsBuff new_proc_self_maps;\n  ReadProcMaps(&new_proc_self_maps);\n  // Don't invalidate the cache if the mappings are unavailable.\n  if (new_proc_self_maps.mmaped_size == 0)\n    return;\n  SpinMutexLock l(&cache_lock);\n  if (cached_proc_self_maps.mmaped_size)\n    UnmapOrDie(cached_proc_self_maps.data, cached_proc_self_maps.mmaped_size);\n  cached_proc_self_maps = new_proc_self_maps;\n}\n\nvoid MemoryMappingLayout::LoadFromCache() {\n  SpinMutexLock l(&cache_lock);\n  if (cached_proc_self_maps.data)\n    data_.proc_self_maps = cached_proc_self_maps;\n}\n\nvoid MemoryMappingLayout::DumpListOfModules(\n    InternalMmapVectorNoCtor<LoadedModule> *modules) {\n  Reset();\n  InternalMmapVector<char> module_name(kMaxPathLength);\n  MemoryMappedSegment segment(module_name.data(), module_name.size());\n  for (uptr i = 0; Next(&segment); i++) {\n    const char *cur_name = segment.filename;\n    if (cur_name[0] == '\\0')\n      continue;\n    // Don't subtract 'cur_beg' from the first entry:\n    // * If a binary is compiled w/o -pie, then the first entry in\n    //   process maps is likely the binary itself (all dynamic libs\n    //   are mapped higher in address space). For such a binary,\n    //   instruction offset in binary coincides with the actual\n    //   instruction address in virtual memory (as code section\n    //   is mapped to a fixed memory range).\n    // * If a binary is compiled with -pie, all the modules are\n    //   mapped high at address space (in particular, higher than\n    //   shadow memory of the tool), so the module can't be the\n    //   first entry.\n    uptr base_address = (i ? segment.start : 0) - segment.offset;\n    LoadedModule cur_module;\n    cur_module.set(cur_name, base_address);\n    segment.AddAddressRanges(&cur_module);\n    modules->push_back(cur_module);\n  }\n}\n\nvoid GetMemoryProfile(fill_profile_f cb, uptr *stats) {\n  char *smaps = nullptr;\n  uptr smaps_cap = 0;\n  uptr smaps_len = 0;\n  if (!ReadFileToBuffer(\"/proc/self/smaps\", &smaps, &smaps_cap, &smaps_len))\n    return;\n  ParseUnixMemoryProfile(cb, stats, smaps, smaps_len);\n  UnmapOrDie(smaps, smaps_cap);\n}\n\nvoid ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,\n                            uptr smaps_len) {\n  uptr start = 0;\n  bool file = false;\n  const char *pos = smaps;\n  char *end = smaps + smaps_len;\n  if (smaps_len < 2)\n    return;\n  // The following parsing can crash on almost every line\n  // in the case of malformed/truncated input.\n  // Fixing that is hard b/c e.g. ParseDecimal does not\n  // even accept end of the buffer and assumes well-formed input.\n  // So instead we patch end of the input a bit,\n  // it does not affect well-formed complete inputs.\n  *--end = 0;\n  *--end = '\\n';\n  while (pos < end) {\n    if (IsHex(pos[0])) {\n      start = ParseHex(&pos);\n      for (; *pos != '/' && *pos > '\\n'; pos++) {}\n      file = *pos == '/';\n    } else if (internal_strncmp(pos, \"Rss:\", 4) == 0) {\n      while (pos < end && !IsDecimal(*pos)) pos++;\n      uptr rss = ParseDecimal(&pos) * 1024;\n      cb(start, rss, file, stats);\n    }\n    while (*pos++ != '\\n') {}\n  }\n}\n\n} // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps_fuchsia.cpp",
    "content": "//===-- sanitizer_procmaps_fuchsia.cpp\n//----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Information about the process mappings (Fuchsia-specific parts).\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_FUCHSIA\n#include <zircon/process.h>\n#include <zircon/syscalls.h>\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_procmaps.h\"\n\nnamespace __sanitizer {\n\n// The cache flag is ignored on Fuchsia because a process can always get this\n// information via its process-self handle.\nMemoryMappingLayout::MemoryMappingLayout(bool) { Reset(); }\n\nvoid MemoryMappingLayout::Reset() {\n  data_.data.clear();\n  data_.current = 0;\n\n  size_t count;\n  zx_status_t status = _zx_object_get_info(\n      _zx_process_self(), ZX_INFO_PROCESS_MAPS, nullptr, 0, nullptr, &count);\n  if (status != ZX_OK) {\n    return;\n  }\n\n  size_t filled;\n  do {\n    data_.data.resize(count);\n    status = _zx_object_get_info(\n        _zx_process_self(), ZX_INFO_PROCESS_MAPS, data_.data.data(),\n        count * sizeof(zx_info_maps_t), &filled, &count);\n    if (status != ZX_OK) {\n      data_.data.clear();\n      return;\n    }\n  } while (filled < count);\n}\n\nMemoryMappingLayout::~MemoryMappingLayout() {}\n\nbool MemoryMappingLayout::Error() const { return data_.data.empty(); }\n\nbool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {\n  while (data_.current < data_.data.size()) {\n    const auto &entry = data_.data[data_.current++];\n    if (entry.type == ZX_INFO_MAPS_TYPE_MAPPING) {\n      segment->start = entry.base;\n      segment->end = entry.base + entry.size;\n      segment->offset = entry.u.mapping.vmo_offset;\n      const auto flags = entry.u.mapping.mmu_flags;\n      segment->protection =\n          ((flags & ZX_VM_PERM_READ) ? kProtectionRead : 0) |\n          ((flags & ZX_VM_PERM_WRITE) ? kProtectionWrite : 0) |\n          ((flags & ZX_VM_PERM_EXECUTE) ? kProtectionExecute : 0);\n      if (segment->filename && segment->filename_size > 0) {\n        uptr len = Min(sizeof(entry.name), segment->filename_size) - 1;\n        internal_strncpy(segment->filename, entry.name, len);\n        segment->filename[len] = 0;\n      }\n      return true;\n    }\n  }\n  return false;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FUCHSIA\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps_linux.cpp",
    "content": "//===-- sanitizer_procmaps_linux.cpp --------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Information about the process mappings (Linux-specific parts).\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_LINUX\n#include \"sanitizer_common.h\"\n#include \"sanitizer_procmaps.h\"\n\nnamespace __sanitizer {\n\nvoid ReadProcMaps(ProcSelfMapsBuff *proc_maps) {\n  if (!ReadFileToBuffer(\"/proc/self/maps\", &proc_maps->data,\n                        &proc_maps->mmaped_size, &proc_maps->len)) {\n    proc_maps->data = nullptr;\n    proc_maps->mmaped_size = 0;\n    proc_maps->len = 0;\n  }\n}\n\nstatic bool IsOneOf(char c, char c1, char c2) {\n  return c == c1 || c == c2;\n}\n\nbool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {\n  if (Error()) return false; // simulate empty maps\n  char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;\n  if (data_.current >= last) return false;\n  char *next_line =\n      (char *)internal_memchr(data_.current, '\\n', last - data_.current);\n  if (next_line == 0)\n    next_line = last;\n  // Example: 08048000-08056000 r-xp 00000000 03:0c 64593   /foo/bar\n  segment->start = ParseHex(&data_.current);\n  CHECK_EQ(*data_.current++, '-');\n  segment->end = ParseHex(&data_.current);\n  CHECK_EQ(*data_.current++, ' ');\n  CHECK(IsOneOf(*data_.current, '-', 'r'));\n  segment->protection = 0;\n  if (*data_.current++ == 'r') segment->protection |= kProtectionRead;\n  CHECK(IsOneOf(*data_.current, '-', 'w'));\n  if (*data_.current++ == 'w') segment->protection |= kProtectionWrite;\n  CHECK(IsOneOf(*data_.current, '-', 'x'));\n  if (*data_.current++ == 'x') segment->protection |= kProtectionExecute;\n  CHECK(IsOneOf(*data_.current, 's', 'p'));\n  if (*data_.current++ == 's') segment->protection |= kProtectionShared;\n  CHECK_EQ(*data_.current++, ' ');\n  segment->offset = ParseHex(&data_.current);\n  CHECK_EQ(*data_.current++, ' ');\n  ParseHex(&data_.current);\n  CHECK_EQ(*data_.current++, ':');\n  ParseHex(&data_.current);\n  CHECK_EQ(*data_.current++, ' ');\n  while (IsDecimal(*data_.current)) data_.current++;\n  // Qemu may lack the trailing space.\n  // https://github.com/google/sanitizers/issues/160\n  // CHECK_EQ(*data_.current++, ' ');\n  // Skip spaces.\n  while (data_.current < next_line && *data_.current == ' ') data_.current++;\n  // Fill in the filename.\n  if (segment->filename) {\n    uptr len =\n        Min((uptr)(next_line - data_.current), segment->filename_size - 1);\n    internal_strncpy(segment->filename, data_.current, len);\n    segment->filename[len] = 0;\n  }\n\n  data_.current = next_line + 1;\n  return true;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_LINUX\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps_mac.cpp",
    "content": "//===-- sanitizer_procmaps_mac.cpp ----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Information about the process mappings (Mac-specific parts).\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_MAC\n#include \"sanitizer_common.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_procmaps.h\"\n\n#include <mach-o/dyld.h>\n#include <mach-o/loader.h>\n#include <mach/mach.h>\n\n// These are not available in older macOS SDKs.\n#ifndef CPU_SUBTYPE_X86_64_H\n#define CPU_SUBTYPE_X86_64_H  ((cpu_subtype_t)8)   /* Haswell */\n#endif\n#ifndef CPU_SUBTYPE_ARM_V7S\n#define CPU_SUBTYPE_ARM_V7S   ((cpu_subtype_t)11)  /* Swift */\n#endif\n#ifndef CPU_SUBTYPE_ARM_V7K\n#define CPU_SUBTYPE_ARM_V7K   ((cpu_subtype_t)12)\n#endif\n#ifndef CPU_TYPE_ARM64\n#define CPU_TYPE_ARM64        (CPU_TYPE_ARM | CPU_ARCH_ABI64)\n#endif\n\nnamespace __sanitizer {\n\n// Contains information used to iterate through sections.\nstruct MemoryMappedSegmentData {\n  char name[kMaxSegName];\n  uptr nsects;\n  const char *current_load_cmd_addr;\n  u32 lc_type;\n  uptr base_virt_addr;\n  uptr addr_mask;\n};\n\ntemplate <typename Section>\nstatic void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,\n                            bool isWritable) {\n  const Section *sc = (const Section *)data->current_load_cmd_addr;\n  data->current_load_cmd_addr += sizeof(Section);\n\n  uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;\n  uptr sec_end = sec_start + sc->size;\n  module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,\n                          sc->sectname);\n}\n\nvoid MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {\n  // Don't iterate over sections when the caller hasn't set up the\n  // data pointer, when there are no sections, or when the segment\n  // is executable. Avoid iterating over executable sections because\n  // it will confuse libignore, and because the extra granularity\n  // of information is not needed by any sanitizers.\n  if (!data_ || !data_->nsects || IsExecutable()) {\n    module->addAddressRange(start, end, IsExecutable(), IsWritable(),\n                            data_ ? data_->name : nullptr);\n    return;\n  }\n\n  do {\n    if (data_->lc_type == LC_SEGMENT) {\n      NextSectionLoad<struct section>(module, data_, IsWritable());\n#ifdef MH_MAGIC_64\n    } else if (data_->lc_type == LC_SEGMENT_64) {\n      NextSectionLoad<struct section_64>(module, data_, IsWritable());\n#endif\n    }\n  } while (--data_->nsects);\n}\n\nMemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {\n  Reset();\n}\n\nMemoryMappingLayout::~MemoryMappingLayout() {\n}\n\nbool MemoryMappingLayout::Error() const {\n  return false;\n}\n\n// More information about Mach-O headers can be found in mach-o/loader.h\n// Each Mach-O image has a header (mach_header or mach_header_64) starting with\n// a magic number, and a list of linker load commands directly following the\n// header.\n// A load command is at least two 32-bit words: the command type and the\n// command size in bytes. We're interested only in segment load commands\n// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped\n// into the task's address space.\n// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or\n// segment_command_64 correspond to the memory address, memory size and the\n// file offset of the current memory segment.\n// Because these fields are taken from the images as is, one needs to add\n// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.\n\nvoid MemoryMappingLayout::Reset() {\n  // Count down from the top.\n  // TODO(glider): as per man 3 dyld, iterating over the headers with\n  // _dyld_image_count is thread-unsafe. We need to register callbacks for\n  // adding and removing images which will invalidate the MemoryMappingLayout\n  // state.\n  data_.current_image = _dyld_image_count();\n  data_.current_load_cmd_count = -1;\n  data_.current_load_cmd_addr = 0;\n  data_.current_magic = 0;\n  data_.current_filetype = 0;\n  data_.current_arch = kModuleArchUnknown;\n  internal_memset(data_.current_uuid, 0, kModuleUUIDSize);\n}\n\n// The dyld load address should be unchanged throughout process execution,\n// and it is expensive to compute once many libraries have been loaded,\n// so cache it here and do not reset.\nstatic mach_header *dyld_hdr = 0;\nstatic const char kDyldPath[] = \"/usr/lib/dyld\";\nstatic const int kDyldImageIdx = -1;\n\n// static\nvoid MemoryMappingLayout::CacheMemoryMappings() {\n  // No-op on Mac for now.\n}\n\nvoid MemoryMappingLayout::LoadFromCache() {\n  // No-op on Mac for now.\n}\n\n// _dyld_get_image_header() and related APIs don't report dyld itself.\n// We work around this by manually recursing through the memory map\n// until we hit a Mach header matching dyld instead. These recurse\n// calls are expensive, but the first memory map generation occurs\n// early in the process, when dyld is one of the only images loaded,\n// so it will be hit after only a few iterations.\nstatic mach_header *get_dyld_image_header() {\n  vm_address_t address = 0;\n\n  while (true) {\n    vm_size_t size = 0;\n    unsigned depth = 1;\n    struct vm_region_submap_info_64 info;\n    mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;\n    kern_return_t err =\n        vm_region_recurse_64(mach_task_self(), &address, &size, &depth,\n                             (vm_region_info_t)&info, &count);\n    if (err != KERN_SUCCESS) return nullptr;\n\n    if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {\n      mach_header *hdr = (mach_header *)address;\n      if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&\n          hdr->filetype == MH_DYLINKER) {\n        return hdr;\n      }\n    }\n    address += size;\n  }\n}\n\nconst mach_header *get_dyld_hdr() {\n  if (!dyld_hdr) dyld_hdr = get_dyld_image_header();\n\n  return dyld_hdr;\n}\n\n// Next and NextSegmentLoad were inspired by base/sysinfo.cc in\n// Google Perftools, https://github.com/gperftools/gperftools.\n\n// NextSegmentLoad scans the current image for the next segment load command\n// and returns the start and end addresses and file offset of the corresponding\n// segment.\n// Note that the segment addresses are not necessarily sorted.\ntemplate <u32 kLCSegment, typename SegmentCommand>\nstatic bool NextSegmentLoad(MemoryMappedSegment *segment,\n                            MemoryMappedSegmentData *seg_data,\n                            MemoryMappingLayoutData *layout_data) {\n  const char *lc = layout_data->current_load_cmd_addr;\n  layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;\n  if (((const load_command *)lc)->cmd == kLCSegment) {\n    const SegmentCommand* sc = (const SegmentCommand *)lc;\n    uptr base_virt_addr, addr_mask;\n    if (layout_data->current_image == kDyldImageIdx) {\n      base_virt_addr = (uptr)get_dyld_hdr();\n      // vmaddr is masked with 0xfffff because on macOS versions < 10.12,\n      // it contains an absolute address rather than an offset for dyld.\n      // To make matters even more complicated, this absolute address\n      // isn't actually the absolute segment address, but the offset portion\n      // of the address is accurate when combined with the dyld base address,\n      // and the mask will give just this offset.\n      addr_mask = 0xfffff;\n    } else {\n      base_virt_addr =\n          (uptr)_dyld_get_image_vmaddr_slide(layout_data->current_image);\n      addr_mask = ~0;\n    }\n\n    segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;\n    segment->end = segment->start + sc->vmsize;\n    // Most callers don't need section information, so only fill this struct\n    // when required.\n    if (seg_data) {\n      seg_data->nsects = sc->nsects;\n      seg_data->current_load_cmd_addr =\n          (const char *)lc + sizeof(SegmentCommand);\n      seg_data->lc_type = kLCSegment;\n      seg_data->base_virt_addr = base_virt_addr;\n      seg_data->addr_mask = addr_mask;\n      internal_strncpy(seg_data->name, sc->segname,\n                       ARRAY_SIZE(seg_data->name));\n    }\n\n    // Return the initial protection.\n    segment->protection = sc->initprot;\n    segment->offset = (layout_data->current_filetype ==\n                       /*MH_EXECUTE*/ 0x2)\n                          ? sc->vmaddr\n                          : sc->fileoff;\n    if (segment->filename) {\n      const char *src = (layout_data->current_image == kDyldImageIdx)\n                            ? kDyldPath\n                            : _dyld_get_image_name(layout_data->current_image);\n      internal_strncpy(segment->filename, src, segment->filename_size);\n    }\n    segment->arch = layout_data->current_arch;\n    internal_memcpy(segment->uuid, layout_data->current_uuid, kModuleUUIDSize);\n    return true;\n  }\n  return false;\n}\n\nModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {\n  cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;\n  switch (cputype) {\n    case CPU_TYPE_I386:\n      return kModuleArchI386;\n    case CPU_TYPE_X86_64:\n      if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;\n      if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;\n      CHECK(0 && \"Invalid subtype of x86_64\");\n      return kModuleArchUnknown;\n    case CPU_TYPE_ARM:\n      if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;\n      if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;\n      if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;\n      if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;\n      CHECK(0 && \"Invalid subtype of ARM\");\n      return kModuleArchUnknown;\n    case CPU_TYPE_ARM64:\n      return kModuleArchARM64;\n    default:\n      CHECK(0 && \"Invalid CPU type\");\n      return kModuleArchUnknown;\n  }\n}\n\nstatic const load_command *NextCommand(const load_command *lc) {\n  return (const load_command *)((const char *)lc + lc->cmdsize);\n}\n\nstatic void FindUUID(const load_command *first_lc, u8 *uuid_output) {\n  for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {\n    if (lc->cmd != LC_UUID) continue;\n\n    const uuid_command *uuid_lc = (const uuid_command *)lc;\n    const uint8_t *uuid = &uuid_lc->uuid[0];\n    internal_memcpy(uuid_output, uuid, kModuleUUIDSize);\n    return;\n  }\n}\n\nstatic bool IsModuleInstrumented(const load_command *first_lc) {\n  for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {\n    if (lc->cmd != LC_LOAD_DYLIB) continue;\n\n    const dylib_command *dylib_lc = (const dylib_command *)lc;\n    uint32_t dylib_name_offset = dylib_lc->dylib.name.offset;\n    const char *dylib_name = ((const char *)dylib_lc) + dylib_name_offset;\n    dylib_name = StripModuleName(dylib_name);\n    if (dylib_name != 0 && (internal_strstr(dylib_name, \"libclang_rt.\"))) {\n      return true;\n    }\n  }\n  return false;\n}\n\nbool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {\n  for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {\n    const mach_header *hdr = (data_.current_image == kDyldImageIdx)\n                                 ? get_dyld_hdr()\n                                 : _dyld_get_image_header(data_.current_image);\n    if (!hdr) continue;\n    if (data_.current_load_cmd_count < 0) {\n      // Set up for this image;\n      data_.current_load_cmd_count = hdr->ncmds;\n      data_.current_magic = hdr->magic;\n      data_.current_filetype = hdr->filetype;\n      data_.current_arch = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);\n      switch (data_.current_magic) {\n#ifdef MH_MAGIC_64\n        case MH_MAGIC_64: {\n          data_.current_load_cmd_addr =\n              (const char *)hdr + sizeof(mach_header_64);\n          break;\n        }\n#endif\n        case MH_MAGIC: {\n          data_.current_load_cmd_addr = (const char *)hdr + sizeof(mach_header);\n          break;\n        }\n        default: {\n          continue;\n        }\n      }\n      FindUUID((const load_command *)data_.current_load_cmd_addr,\n               data_.current_uuid);\n      data_.current_instrumented = IsModuleInstrumented(\n          (const load_command *)data_.current_load_cmd_addr);\n    }\n\n    for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {\n      switch (data_.current_magic) {\n        // data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.\n#ifdef MH_MAGIC_64\n        case MH_MAGIC_64: {\n          if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(\n                  segment, segment->data_, &data_))\n            return true;\n          break;\n        }\n#endif\n        case MH_MAGIC: {\n          if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(\n                  segment, segment->data_, &data_))\n            return true;\n          break;\n        }\n      }\n    }\n    // If we get here, no more load_cmd's in this image talk about\n    // segments.  Go on to the next image.\n  }\n  return false;\n}\n\nvoid MemoryMappingLayout::DumpListOfModules(\n    InternalMmapVectorNoCtor<LoadedModule> *modules) {\n  Reset();\n  InternalMmapVector<char> module_name(kMaxPathLength);\n  MemoryMappedSegment segment(module_name.data(), module_name.size());\n  MemoryMappedSegmentData data;\n  segment.data_ = &data;\n  while (Next(&segment)) {\n    if (segment.filename[0] == '\\0') continue;\n    LoadedModule *cur_module = nullptr;\n    if (!modules->empty() &&\n        0 == internal_strcmp(segment.filename, modules->back().full_name())) {\n      cur_module = &modules->back();\n    } else {\n      modules->push_back(LoadedModule());\n      cur_module = &modules->back();\n      cur_module->set(segment.filename, segment.start, segment.arch,\n                      segment.uuid, data_.current_instrumented);\n    }\n    segment.AddAddressRanges(cur_module);\n  }\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_procmaps_solaris.cpp",
    "content": "//===-- sanitizer_procmaps_solaris.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Information about the process mappings (Solaris-specific parts).\n//===----------------------------------------------------------------------===//\n\n// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.\n#undef _FILE_OFFSET_BITS\n#include \"sanitizer_platform.h\"\n#if SANITIZER_SOLARIS\n#include \"sanitizer_common.h\"\n#include \"sanitizer_procmaps.h\"\n\n#include <procfs.h>\n#include <limits.h>\n\nnamespace __sanitizer {\n\nvoid ReadProcMaps(ProcSelfMapsBuff *proc_maps) {\n  if (!ReadFileToBuffer(\"/proc/self/xmap\", &proc_maps->data,\n                        &proc_maps->mmaped_size, &proc_maps->len)) {\n    proc_maps->data = nullptr;\n    proc_maps->mmaped_size = 0;\n    proc_maps->len = 0;\n  }\n}\n\nbool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {\n  if (Error()) return false; // simulate empty maps\n  char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;\n  if (data_.current >= last) return false;\n\n  prxmap_t *xmapentry =\n      const_cast<prxmap_t *>(reinterpret_cast<const prxmap_t *>(data_.current));\n\n  segment->start = (uptr)xmapentry->pr_vaddr;\n  segment->end = (uptr)(xmapentry->pr_vaddr + xmapentry->pr_size);\n  segment->offset = (uptr)xmapentry->pr_offset;\n\n  segment->protection = 0;\n  if ((xmapentry->pr_mflags & MA_READ) != 0)\n    segment->protection |= kProtectionRead;\n  if ((xmapentry->pr_mflags & MA_WRITE) != 0)\n    segment->protection |= kProtectionWrite;\n  if ((xmapentry->pr_mflags & MA_EXEC) != 0)\n    segment->protection |= kProtectionExecute;\n\n  if (segment->filename != NULL && segment->filename_size > 0) {\n    char proc_path[PATH_MAX + 1];\n\n    internal_snprintf(proc_path, sizeof(proc_path), \"/proc/self/path/%s\",\n                      xmapentry->pr_mapname);\n    ssize_t sz = internal_readlink(proc_path, segment->filename,\n                                   segment->filename_size - 1);\n\n    // If readlink failed, the map is anonymous.\n    if (sz == -1) {\n      segment->filename[0] = '\\0';\n    } else if ((size_t)sz < segment->filename_size)\n      // readlink doesn't NUL-terminate.\n      segment->filename[sz] = '\\0';\n  }\n\n  data_.current += sizeof(prxmap_t);\n\n  return true;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SOLARIS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_ptrauth.h",
    "content": "//===-- sanitizer_ptrauth.h -------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PTRAUTH_H\n#define SANITIZER_PTRAUTH_H\n\n#if __has_feature(ptrauth_calls)\n#include <ptrauth.h>\n#elif defined(__ARM_FEATURE_PAC_DEFAULT) && !defined(__APPLE__)\ninline unsigned long ptrauth_strip(void* __value, unsigned int __key) {\n  // On the stack the link register is protected with Pointer\n  // Authentication Code when compiled with -mbranch-protection.\n  // Let's stripping the PAC unconditionally because xpaclri is in\n  // the NOP space so will do nothing when it is not enabled or not available.\n  unsigned long ret;\n  asm volatile(\n      \"mov x30, %1\\n\\t\"\n      \"hint #7\\n\\t\"  // xpaclri\n      \"mov %0, x30\\n\\t\"\n      : \"=r\"(ret)\n      : \"r\"(__value)\n      : \"x30\");\n  return ret;\n}\n#define ptrauth_auth_data(__value, __old_key, __old_data) __value\n#define ptrauth_string_discriminator(__string) ((int)0)\n#else\n// Copied from <ptrauth.h>\n#define ptrauth_strip(__value, __key) __value\n#define ptrauth_auth_data(__value, __old_key, __old_data) __value\n#define ptrauth_string_discriminator(__string) ((int)0)\n#endif\n\n#define STRIP_PAC_PC(pc) ((uptr)ptrauth_strip(pc, 0))\n\n#endif // SANITIZER_PTRAUTH_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_quarantine.h",
    "content": "//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Memory quarantine for AddressSanitizer and potentially other tools.\n// Quarantine caches some specified amount of memory in per-thread caches,\n// then evicts to global FIFO queue. When the queue reaches specified threshold,\n// oldest memory is recycled.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_QUARANTINE_H\n#define SANITIZER_QUARANTINE_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_list.h\"\n\nnamespace __sanitizer {\n\ntemplate<typename Node> class QuarantineCache;\n\nstruct QuarantineBatch {\n  static const uptr kSize = 1021;\n  QuarantineBatch *next;\n  uptr size;\n  uptr count;\n  void *batch[kSize];\n\n  void init(void *ptr, uptr size) {\n    count = 1;\n    batch[0] = ptr;\n    this->size = size + sizeof(QuarantineBatch);  // Account for the batch size.\n  }\n\n  // The total size of quarantined nodes recorded in this batch.\n  uptr quarantined_size() const {\n    return size - sizeof(QuarantineBatch);\n  }\n\n  void push_back(void *ptr, uptr size) {\n    CHECK_LT(count, kSize);\n    batch[count++] = ptr;\n    this->size += size;\n  }\n\n  bool can_merge(const QuarantineBatch* const from) const {\n    return count + from->count <= kSize;\n  }\n\n  void merge(QuarantineBatch* const from) {\n    CHECK_LE(count + from->count, kSize);\n    CHECK_GE(size, sizeof(QuarantineBatch));\n\n    for (uptr i = 0; i < from->count; ++i)\n      batch[count + i] = from->batch[i];\n    count += from->count;\n    size += from->quarantined_size();\n\n    from->count = 0;\n    from->size = sizeof(QuarantineBatch);\n  }\n};\n\nCOMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.\n\n// The callback interface is:\n// void Callback::Recycle(Node *ptr);\n// void *cb.Allocate(uptr size);\n// void cb.Deallocate(void *ptr);\ntemplate<typename Callback, typename Node>\nclass Quarantine {\n public:\n  typedef QuarantineCache<Callback> Cache;\n\n  explicit Quarantine(LinkerInitialized)\n      : cache_(LINKER_INITIALIZED) {\n  }\n\n  void Init(uptr size, uptr cache_size) {\n    // Thread local quarantine size can be zero only when global quarantine size\n    // is zero (it allows us to perform just one atomic read per Put() call).\n    CHECK((size == 0 && cache_size == 0) || cache_size != 0);\n\n    atomic_store_relaxed(&max_size_, size);\n    atomic_store_relaxed(&min_size_, size / 10 * 9);  // 90% of max size.\n    atomic_store_relaxed(&max_cache_size_, cache_size);\n\n    cache_mutex_.Init();\n    recycle_mutex_.Init();\n  }\n\n  uptr GetSize() const { return atomic_load_relaxed(&max_size_); }\n  uptr GetCacheSize() const {\n    return atomic_load_relaxed(&max_cache_size_);\n  }\n\n  void Put(Cache *c, Callback cb, Node *ptr, uptr size) {\n    uptr cache_size = GetCacheSize();\n    if (cache_size) {\n      c->Enqueue(cb, ptr, size);\n    } else {\n      // GetCacheSize() == 0 only when GetSize() == 0 (see Init).\n      cb.Recycle(ptr);\n    }\n    // Check cache size anyway to accommodate for runtime cache_size change.\n    if (c->Size() > cache_size)\n      Drain(c, cb);\n  }\n\n  void NOINLINE Drain(Cache *c, Callback cb) {\n    {\n      SpinMutexLock l(&cache_mutex_);\n      cache_.Transfer(c);\n    }\n    if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())\n      Recycle(atomic_load_relaxed(&min_size_), cb);\n  }\n\n  void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {\n    {\n      SpinMutexLock l(&cache_mutex_);\n      cache_.Transfer(c);\n    }\n    recycle_mutex_.Lock();\n    Recycle(0, cb);\n  }\n\n  void PrintStats() const {\n    // It assumes that the world is stopped, just as the allocator's PrintStats.\n    Printf(\"Quarantine limits: global: %zdMb; thread local: %zdKb\\n\",\n           GetSize() >> 20, GetCacheSize() >> 10);\n    cache_.PrintStats();\n  }\n\n private:\n  // Read-only data.\n  char pad0_[kCacheLineSize];\n  atomic_uintptr_t max_size_;\n  atomic_uintptr_t min_size_;\n  atomic_uintptr_t max_cache_size_;\n  char pad1_[kCacheLineSize];\n  StaticSpinMutex cache_mutex_;\n  StaticSpinMutex recycle_mutex_;\n  Cache cache_;\n  char pad2_[kCacheLineSize];\n\n  void NOINLINE Recycle(uptr min_size, Callback cb)\n      SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {\n    Cache tmp;\n    {\n      SpinMutexLock l(&cache_mutex_);\n      // Go over the batches and merge partially filled ones to\n      // save some memory, otherwise batches themselves (since the memory used\n      // by them is counted against quarantine limit) can overcome the actual\n      // user's quarantined chunks, which diminishes the purpose of the\n      // quarantine.\n      uptr cache_size = cache_.Size();\n      uptr overhead_size = cache_.OverheadSize();\n      CHECK_GE(cache_size, overhead_size);\n      // Do the merge only when overhead exceeds this predefined limit (might\n      // require some tuning). It saves us merge attempt when the batch list\n      // quarantine is unlikely to contain batches suitable for merge.\n      const uptr kOverheadThresholdPercents = 100;\n      if (cache_size > overhead_size &&\n          overhead_size * (100 + kOverheadThresholdPercents) >\n              cache_size * kOverheadThresholdPercents) {\n        cache_.MergeBatches(&tmp);\n      }\n      // Extract enough chunks from the quarantine to get below the max\n      // quarantine size and leave some leeway for the newly quarantined chunks.\n      while (cache_.Size() > min_size) {\n        tmp.EnqueueBatch(cache_.DequeueBatch());\n      }\n    }\n    recycle_mutex_.Unlock();\n    DoRecycle(&tmp, cb);\n  }\n\n  void NOINLINE DoRecycle(Cache *c, Callback cb) {\n    while (QuarantineBatch *b = c->DequeueBatch()) {\n      const uptr kPrefetch = 16;\n      CHECK(kPrefetch <= ARRAY_SIZE(b->batch));\n      for (uptr i = 0; i < kPrefetch; i++)\n        PREFETCH(b->batch[i]);\n      for (uptr i = 0, count = b->count; i < count; i++) {\n        if (i + kPrefetch < count)\n          PREFETCH(b->batch[i + kPrefetch]);\n        cb.Recycle((Node*)b->batch[i]);\n      }\n      cb.Deallocate(b);\n    }\n  }\n};\n\n// Per-thread cache of memory blocks.\ntemplate<typename Callback>\nclass QuarantineCache {\n public:\n  explicit QuarantineCache(LinkerInitialized) {\n  }\n\n  QuarantineCache()\n      : size_() {\n    list_.clear();\n  }\n\n  // Total memory used, including internal accounting.\n  uptr Size() const {\n    return atomic_load_relaxed(&size_);\n  }\n\n  // Memory used for internal accounting.\n  uptr OverheadSize() const {\n    return list_.size() * sizeof(QuarantineBatch);\n  }\n\n  void Enqueue(Callback cb, void *ptr, uptr size) {\n    if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {\n      QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));\n      CHECK(b);\n      b->init(ptr, size);\n      EnqueueBatch(b);\n    } else {\n      list_.back()->push_back(ptr, size);\n      SizeAdd(size);\n    }\n  }\n\n  void Transfer(QuarantineCache *from_cache) {\n    list_.append_back(&from_cache->list_);\n    SizeAdd(from_cache->Size());\n\n    atomic_store_relaxed(&from_cache->size_, 0);\n  }\n\n  void EnqueueBatch(QuarantineBatch *b) {\n    list_.push_back(b);\n    SizeAdd(b->size);\n  }\n\n  QuarantineBatch *DequeueBatch() {\n    if (list_.empty())\n      return nullptr;\n    QuarantineBatch *b = list_.front();\n    list_.pop_front();\n    SizeSub(b->size);\n    return b;\n  }\n\n  void MergeBatches(QuarantineCache *to_deallocate) {\n    uptr extracted_size = 0;\n    QuarantineBatch *current = list_.front();\n    while (current && current->next) {\n      if (current->can_merge(current->next)) {\n        QuarantineBatch *extracted = current->next;\n        // Move all the chunks into the current batch.\n        current->merge(extracted);\n        CHECK_EQ(extracted->count, 0);\n        CHECK_EQ(extracted->size, sizeof(QuarantineBatch));\n        // Remove the next batch from the list and account for its size.\n        list_.extract(current, extracted);\n        extracted_size += extracted->size;\n        // Add it to deallocation list.\n        to_deallocate->EnqueueBatch(extracted);\n      } else {\n        current = current->next;\n      }\n    }\n    SizeSub(extracted_size);\n  }\n\n  void PrintStats() const {\n    uptr batch_count = 0;\n    uptr total_overhead_bytes = 0;\n    uptr total_bytes = 0;\n    uptr total_quarantine_chunks = 0;\n    for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {\n      batch_count++;\n      total_bytes += (*it).size;\n      total_overhead_bytes += (*it).size - (*it).quarantined_size();\n      total_quarantine_chunks += (*it).count;\n    }\n    uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;\n    int chunks_usage_percent = quarantine_chunks_capacity == 0 ?\n        0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;\n    uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;\n    int memory_overhead_percent = total_quarantined_bytes == 0 ?\n        0 : total_overhead_bytes * 100 / total_quarantined_bytes;\n    Printf(\"Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); \"\n           \"chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead\"\n           \"\\n\",\n           batch_count, total_bytes, total_quarantined_bytes,\n           total_quarantine_chunks, quarantine_chunks_capacity,\n           chunks_usage_percent, memory_overhead_percent);\n  }\n\n private:\n  typedef IntrusiveList<QuarantineBatch> List;\n\n  List list_;\n  atomic_uintptr_t size_;\n\n  void SizeAdd(uptr add) {\n    atomic_store_relaxed(&size_, Size() + add);\n  }\n  void SizeSub(uptr sub) {\n    atomic_store_relaxed(&size_, Size() - sub);\n  }\n};\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_QUARANTINE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_report_decorator.h",
    "content": "//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tags to decorate the sanitizer reports.\n// Currently supported tags:\n//   * None.\n//   * ANSI color sequences.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_REPORT_DECORATOR_H\n#define SANITIZER_REPORT_DECORATOR_H\n\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\nclass SanitizerCommonDecorator {\n  // FIXME: This is not portable. It assumes the special strings are printed to\n  // stdout, which is not the case on Windows (see SetConsoleTextAttribute()).\n public:\n  SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}\n  const char *Bold() const { return ansi_ ? \"\\033[1m\" : \"\"; }\n  const char *Default() const { return ansi_ ? \"\\033[1m\\033[0m\"  : \"\"; }\n  const char *Warning() const { return Red(); }\n  const char *Error() const { return Red(); }\n  const char *MemoryByte() const { return Magenta(); }\n\n protected:\n  const char *Black()   const { return ansi_ ? \"\\033[1m\\033[30m\" : \"\"; }\n  const char *Red()     const { return ansi_ ? \"\\033[1m\\033[31m\" : \"\"; }\n  const char *Green()   const { return ansi_ ? \"\\033[1m\\033[32m\" : \"\"; }\n  const char *Yellow()  const { return ansi_ ? \"\\033[1m\\033[33m\" : \"\"; }\n  const char *Blue()    const { return ansi_ ? \"\\033[1m\\033[34m\" : \"\"; }\n  const char *Magenta() const { return ansi_ ? \"\\033[1m\\033[35m\" : \"\"; }\n  const char *Cyan()    const { return ansi_ ? \"\\033[1m\\033[36m\" : \"\"; }\n  const char *White()   const { return ansi_ ? \"\\033[1m\\033[37m\" : \"\"; }\n private:\n  bool ansi_;\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_REPORT_DECORATOR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_ring_buffer.h",
    "content": "//===-- sanitizer_ring_buffer.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Simple ring buffer.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_RING_BUFFER_H\n#define SANITIZER_RING_BUFFER_H\n\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n// RingBuffer<T>: fixed-size ring buffer optimized for speed of push().\n// T should be a POD type and sizeof(T) should be divisible by sizeof(void*).\n// At creation, all elements are zero.\ntemplate<class T>\nclass RingBuffer {\n public:\n  COMPILER_CHECK(sizeof(T) % sizeof(void *) == 0);\n  static RingBuffer *New(uptr Size) {\n    void *Ptr = MmapOrDie(SizeInBytes(Size), \"RingBuffer\");\n    RingBuffer *RB = reinterpret_cast<RingBuffer*>(Ptr);\n    uptr End = reinterpret_cast<uptr>(Ptr) + SizeInBytes(Size);\n    RB->last_ = RB->next_ = reinterpret_cast<T*>(End - sizeof(T));\n    return RB;\n  }\n  void Delete() {\n    UnmapOrDie(this, SizeInBytes(size()));\n  }\n  uptr size() const {\n    return last_ + 1 -\n           reinterpret_cast<T *>(reinterpret_cast<uptr>(this) +\n                                 2 * sizeof(T *));\n  }\n\n  static uptr SizeInBytes(uptr Size) {\n    return Size * sizeof(T) + 2 * sizeof(T*);\n  }\n\n  uptr SizeInBytes() { return SizeInBytes(size()); }\n\n  void push(T t) {\n    *next_ = t;\n    next_--;\n    // The condition below works only if sizeof(T) is divisible by sizeof(T*).\n    if (next_ <= reinterpret_cast<T*>(&next_))\n      next_ = last_;\n  }\n\n  T operator[](uptr Idx) const {\n    CHECK_LT(Idx, size());\n    sptr IdxNext = Idx + 1;\n    if (IdxNext > last_ - next_)\n      IdxNext -= size();\n    return next_[IdxNext];\n  }\n\n private:\n  RingBuffer() {}\n  ~RingBuffer() {}\n  RingBuffer(const RingBuffer&) = delete;\n\n  // Data layout:\n  // LNDDDDDDDD\n  // D: data elements.\n  // L: last_, always points to the last data element.\n  // N: next_, initially equals to last_, is decremented on every push,\n  //    wraps around if it's less or equal than its own address.\n  T *last_;\n  T *next_;\n  T data_[1];  // flexible array.\n};\n\n// A ring buffer with externally provided storage that encodes its state in 8\n// bytes. Has significant constraints on size and alignment of storage.\n// See a comment in hwasan/hwasan_thread_list.h for the motivation behind this.\n#if SANITIZER_WORDSIZE == 64\ntemplate <class T>\nclass CompactRingBuffer {\n  // Top byte of long_ stores the buffer size in pages.\n  // Lower bytes store the address of the next buffer element.\n  static constexpr int kPageSizeBits = 12;\n  static constexpr int kSizeShift = 56;\n  static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1;\n\n  uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; }\n\n  void Init(void *storage, uptr size) {\n    CHECK_EQ(sizeof(CompactRingBuffer<T>), sizeof(void *));\n    CHECK(IsPowerOfTwo(size));\n    CHECK_GE(size, 1 << kPageSizeBits);\n    CHECK_LE(size, 128 << kPageSizeBits);\n    CHECK_EQ(size % 4096, 0);\n    CHECK_EQ(size % sizeof(T), 0);\n    CHECK_EQ((uptr)storage % (size * 2), 0);\n    long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift);\n  }\n\n  void SetNext(const T *next) {\n    long_ = (long_ & ~kNextMask) | (uptr)next;\n  }\n\n public:\n  CompactRingBuffer(void *storage, uptr size) {\n    Init(storage, size);\n  }\n\n  // A copy constructor of sorts.\n  CompactRingBuffer(const CompactRingBuffer &other, void *storage) {\n    uptr size = other.GetStorageSize();\n    internal_memcpy(storage, other.StartOfStorage(), size);\n    Init(storage, size);\n    uptr Idx = other.Next() - (const T *)other.StartOfStorage();\n    SetNext((const T *)storage + Idx);\n  }\n\n  T *Next() const { return (T *)(long_ & kNextMask); }\n\n  void *StartOfStorage() const {\n    return (void *)((uptr)Next() & ~(GetStorageSize() - 1));\n  }\n\n  void *EndOfStorage() const {\n    return (void *)((uptr)StartOfStorage() + GetStorageSize());\n  }\n\n  uptr size() const { return GetStorageSize() / sizeof(T); }\n\n  void push(T t) {\n    T *next = Next();\n    *next = t;\n    next++;\n    next = (T *)((uptr)next & ~GetStorageSize());\n    SetNext(next);\n  }\n\n  const T &operator[](uptr Idx) const {\n    CHECK_LT(Idx, size());\n    const T *Begin = (const T *)StartOfStorage();\n    sptr StorageIdx = Next() - Begin;\n    StorageIdx -= (sptr)(Idx + 1);\n    if (StorageIdx < 0)\n      StorageIdx += size();\n    return Begin[StorageIdx];\n  }\n\n public:\n  ~CompactRingBuffer() {}\n  CompactRingBuffer(const CompactRingBuffer &) = delete;\n\n  uptr long_;\n};\n#endif\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_RING_BUFFER_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_rtems.cpp",
    "content": "//===-- sanitizer_rtems.cpp -----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries and\n// implements RTEMS-specific functions.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_rtems.h\"\n#if SANITIZER_RTEMS\n\n#define posix_memalign __real_posix_memalign\n#define free __real_free\n#define memset __real_memset\n\n#include \"sanitizer_file.h\"\n#include \"sanitizer_symbolizer.h\"\n#include <errno.h>\n#include <fcntl.h>\n#include <pthread.h>\n#include <sched.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n// There is no mmap on RTEMS.  Use memalign, etc.\n#define __mmap_alloc_aligned posix_memalign\n#define __mmap_free free\n#define __mmap_memset memset\n\nnamespace __sanitizer {\n\n#include \"sanitizer_syscall_generic.inc\"\n\nvoid NORETURN internal__exit(int exitcode) {\n  _exit(exitcode);\n}\n\nuptr internal_sched_yield() {\n  return sched_yield();\n}\n\nuptr internal_getpid() {\n  return getpid();\n}\n\nint internal_dlinfo(void *handle, int request, void *p) {\n  UNIMPLEMENTED();\n}\n\nbool FileExists(const char *filename) {\n  struct stat st;\n  if (stat(filename, &st))\n    return false;\n  // Sanity check: filename is a regular file.\n  return S_ISREG(st.st_mode);\n}\n\nuptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); }\n\ntid_t GetTid() { return GetThreadSelf(); }\n\nvoid Abort() { abort(); }\n\nint Atexit(void (*function)(void)) { return atexit(function); }\n\nvoid SleepForSeconds(int seconds) { sleep(seconds); }\n\nvoid SleepForMillis(int millis) { usleep(millis * 1000); }\n\nbool SupportsColoredOutput(fd_t fd) { return false; }\n\nvoid GetThreadStackTopAndBottom(bool at_initialization,\n                                uptr *stack_top, uptr *stack_bottom) {\n  pthread_attr_t attr;\n  pthread_attr_init(&attr);\n  CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);\n  void *base = nullptr;\n  size_t size = 0;\n  CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);\n  CHECK_EQ(pthread_attr_destroy(&attr), 0);\n\n  *stack_bottom = reinterpret_cast<uptr>(base);\n  *stack_top = *stack_bottom + size;\n}\n\nvoid GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,\n                          uptr *tls_addr, uptr *tls_size) {\n  uptr stack_top, stack_bottom;\n  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);\n  *stk_addr = stack_bottom;\n  *stk_size = stack_top - stack_bottom;\n  *tls_addr = *tls_size = 0;\n}\n\nvoid InitializePlatformEarly() {}\nvoid MaybeReexec() {}\nvoid CheckASLR() {}\nvoid CheckMPROTECT() {}\nvoid DisableCoreDumperIfNecessary() {}\nvoid InstallDeadlySignalHandlers(SignalHandlerType handler) {}\nvoid SetAlternateSignalStack() {}\nvoid UnsetAlternateSignalStack() {}\nvoid InitTlsSize() {}\n\nvoid SignalContext::DumpAllRegisters(void *context) {}\nconst char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }\n\nenum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };\n\nBlockingMutex::BlockingMutex() {\n  internal_memset(this, 0, sizeof(*this));\n}\n\nvoid BlockingMutex::Lock() {\n  CHECK_EQ(owner_, 0);\n  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);\n  if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)\n    return;\n  while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {\n    internal_sched_yield();\n  }\n}\n\nvoid BlockingMutex::Unlock() {\n  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);\n  u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);\n  CHECK_NE(v, MtxUnlocked);\n}\n\nvoid BlockingMutex::CheckLocked() {\n  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);\n  CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));\n}\n\nuptr GetPageSize() { return getpagesize(); }\n\nuptr GetMmapGranularity() { return GetPageSize(); }\n\nuptr GetMaxVirtualAddress() {\n  return (1ULL << 32) - 1;  // 0xffffffff\n}\n\nvoid *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {\n  void* ptr = 0;\n  int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);\n  if (UNLIKELY(res))\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\", res, raw_report);\n  __mmap_memset(ptr, 0, size);\n  IncreaseTotalMmap(size);\n  return ptr;\n}\n\nvoid *MmapOrDieOnFatalError(uptr size, const char *mem_type) {\n  void* ptr = 0;\n  int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);\n  if (UNLIKELY(res)) {\n    if (res == ENOMEM)\n      return nullptr;\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\", false);\n  }\n  __mmap_memset(ptr, 0, size);\n  IncreaseTotalMmap(size);\n  return ptr;\n}\n\nvoid *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,\n                                   const char *mem_type) {\n  CHECK(IsPowerOfTwo(size));\n  CHECK(IsPowerOfTwo(alignment));\n  void* ptr = 0;\n  int res = __mmap_alloc_aligned(&ptr, alignment, size);\n  if (res)\n    ReportMmapFailureAndDie(size, mem_type, \"align allocate\", res, false);\n  __mmap_memset(ptr, 0, size);\n  IncreaseTotalMmap(size);\n  return ptr;\n}\n\nvoid *MmapNoReserveOrDie(uptr size, const char *mem_type) {\n  return MmapOrDie(size, mem_type, false);\n}\n\nvoid UnmapOrDie(void *addr, uptr size) {\n  if (!addr || !size) return;\n  __mmap_free(addr);\n  DecreaseTotalMmap(size);\n}\n\nfd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {\n  int flags;\n  switch (mode) {\n    case RdOnly: flags = O_RDONLY; break;\n    case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;\n    case RdWr: flags = O_RDWR | O_CREAT; break;\n  }\n  fd_t res = open(filename, flags, 0660);\n  if (internal_iserror(res, errno_p))\n    return kInvalidFd;\n  return res;\n}\n\nvoid CloseFile(fd_t fd) {\n  close(fd);\n}\n\nbool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,\n                  error_t *error_p) {\n  uptr res = read(fd, buff, buff_size);\n  if (internal_iserror(res, error_p))\n    return false;\n  if (bytes_read)\n    *bytes_read = res;\n  return true;\n}\n\nbool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,\n                 error_t *error_p) {\n  uptr res = write(fd, buff, buff_size);\n  if (internal_iserror(res, error_p))\n    return false;\n  if (bytes_written)\n    *bytes_written = res;\n  return true;\n}\n\nvoid ReleaseMemoryPagesToOS(uptr beg, uptr end) {}\nvoid DumpProcessMap() {}\n\n// There is no page protection so everything is \"accessible.\"\nbool IsAccessibleMemoryRange(uptr beg, uptr size) {\n  return true;\n}\n\nchar **GetArgv() { return nullptr; }\nchar **GetEnviron() { return nullptr; }\n\nconst char *GetEnv(const char *name) {\n  return getenv(name);\n}\n\nuptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {\n  internal_strncpy(buf, \"StubBinaryName\", buf_len);\n  return internal_strlen(buf);\n}\n\nuptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {\n  internal_strncpy(buf, \"StubProcessName\", buf_len);\n  return internal_strlen(buf);\n}\n\nbool IsPathSeparator(const char c) {\n  return c == '/';\n}\n\nbool IsAbsolutePath(const char *path) {\n  return path != nullptr && IsPathSeparator(path[0]);\n}\n\nvoid ReportFile::Write(const char *buffer, uptr length) {\n  SpinMutexLock l(mu);\n  static const char *kWriteError =\n      \"ReportFile::Write() can't output requested buffer!\\n\";\n  ReopenIfNecessary();\n  if (length != write(fd, buffer, length)) {\n    write(fd, kWriteError, internal_strlen(kWriteError));\n    Die();\n  }\n}\n\nuptr MainThreadStackBase, MainThreadStackSize;\nuptr MainThreadTlsBase, MainThreadTlsSize;\n\n} // namespace __sanitizer\n\n#endif  // SANITIZER_RTEMS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_rtems.h",
    "content": "//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries and\n// provides definitions for RTEMS-specific functions.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_RTEMS_H\n#define SANITIZER_RTEMS_H\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_RTEMS\n#include \"sanitizer_common.h\"\n\n#endif  // SANITIZER_RTEMS\n#endif  // SANITIZER_RTEMS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_signal_interceptors.inc",
    "content": "//===-- sanitizer_signal_interceptors.inc -----------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Signal interceptors for sanitizers.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"interception/interception.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform_interceptors.h\"\n\nusing namespace __sanitizer;\n\n#if SANITIZER_NETBSD\n#define sigaction_symname __sigaction14\n#else\n#define sigaction_symname sigaction\n#endif\n\n#ifndef SIGNAL_INTERCEPTOR_SIGNAL_IMPL\n#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signum, handler) \\\n  { return REAL(func)(signum, handler); }\n#endif\n\n#ifndef SIGNAL_INTERCEPTOR_SIGACTION_IMPL\n#  define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact)              \\\n    {                                                                         \\\n      if (!REAL(sigaction_symname)) {                                         \\\n        Printf(                                                               \\\n            \"Warning: REAL(sigaction_symname) == nullptr. This may happen \"   \\\n            \"if you link with ubsan statically. Sigaction will not work.\\n\"); \\\n        return -1;                                                            \\\n      }                                                                       \\\n      return REAL(sigaction_symname)(signum, act, oldact);                    \\\n    }\n#endif\n\n#if SANITIZER_INTERCEPT_BSD_SIGNAL\nINTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {\n  if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;\n  SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);\n}\n#define INIT_BSD_SIGNAL COMMON_INTERCEPT_FUNCTION(bsd_signal)\n#else  // SANITIZER_INTERCEPT_BSD_SIGNAL\n#define INIT_BSD_SIGNAL\n#endif  // SANITIZER_INTERCEPT_BSD_SIGNAL\n\n#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION\nINTERCEPTOR(uptr, signal, int signum, uptr handler) {\n  if (GetHandleSignalMode(signum) == kHandleSignalExclusive)\n    return (uptr) nullptr;\n  SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);\n}\n#define INIT_SIGNAL COMMON_INTERCEPT_FUNCTION(signal)\n\nINTERCEPTOR(int, sigaction_symname, int signum,\n            const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {\n  if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {\n    if (!oldact) return 0;\n    act = nullptr;\n  }\n  SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);\n}\n#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)\n\nnamespace __sanitizer {\nint real_sigaction(int signum, const void *act, void *oldact) {\n  return REAL(sigaction_symname)(signum, (const __sanitizer_sigaction *)act,\n                         (__sanitizer_sigaction *)oldact);\n}\n}  // namespace __sanitizer\n#else  // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION\n#define INIT_SIGNAL\n#define INIT_SIGACTION\n// We need to have defined REAL(sigaction) on other systems.\nnamespace __sanitizer {\nstruct __sanitizer_sigaction;\n}\nDEFINE_REAL(int, sigaction, int signum, const __sanitizer_sigaction *act,\n            __sanitizer_sigaction *oldact)\n#endif  // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION\n\nstatic void InitializeSignalInterceptors() {\n  static bool was_called_once;\n  CHECK(!was_called_once);\n  was_called_once = true;\n\n  INIT_BSD_SIGNAL;\n  INIT_SIGNAL;\n  INIT_SIGACTION;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_solaris.cpp",
    "content": "//===-- sanitizer_solaris.cpp ---------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries and\n// implements Solaris-specific functions.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_SOLARIS\n\n#include <stdio.h>\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_procmaps.h\"\n\n#include <fcntl.h>\n#include <pthread.h>\n#include <sched.h>\n#include <thread.h>\n#include <synch.h>\n#include <signal.h>\n#include <sys/mman.h>\n#include <sys/resource.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <dirent.h>\n#include <unistd.h>\n#include <errno.h>\n#include <stdlib.h>\n\nnamespace __sanitizer {\n\n//#include \"sanitizer_syscall_generic.inc\"\n\n#define _REAL(func) _ ## func\n#define DECLARE__REAL(ret_type, func, ...) \\\n  extern \"C\" ret_type _REAL(func)(__VA_ARGS__)\n#define DECLARE__REAL_AND_INTERNAL(ret_type, func, ...) \\\n  DECLARE__REAL(ret_type, func, __VA_ARGS__); \\\n  ret_type internal_ ## func(__VA_ARGS__)\n\n#if !defined(_LP64) && _FILE_OFFSET_BITS == 64\n#define _REAL64(func) _ ## func ## 64\n#else\n#define _REAL64(func) _REAL(func)\n#endif\n#define DECLARE__REAL64(ret_type, func, ...) \\\n  extern \"C\" ret_type _REAL64(func)(__VA_ARGS__)\n#define DECLARE__REAL_AND_INTERNAL64(ret_type, func, ...) \\\n  DECLARE__REAL64(ret_type, func, __VA_ARGS__); \\\n  ret_type internal_ ## func(__VA_ARGS__)\n\n// ---------------------- sanitizer_libc.h\nDECLARE__REAL_AND_INTERNAL64(uptr, mmap, void *addr, uptr /*size_t*/ length,\n                             int prot, int flags, int fd, OFF_T offset) {\n  return (uptr)_REAL64(mmap)(addr, length, prot, flags, fd, offset);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, munmap, void *addr, uptr length) {\n  return _REAL(munmap)(addr, length);\n}\n\nDECLARE__REAL_AND_INTERNAL(int, mprotect, void *addr, uptr length, int prot) {\n  return _REAL(mprotect)(addr, length, prot);\n}\n\n// Illumos' declaration of madvise cannot be made visible if _XOPEN_SOURCE\n// is defined as g++ does on Solaris.\n//\n// This declaration is consistent with Solaris 11.4. Both Illumos and Solaris\n// versions older than 11.4 declared madvise with a caddr_t as the first\n// argument, but we don't currently support Solaris versions older than 11.4,\n// and as mentioned above the declaration is not visible on Illumos so we can\n// use any declaration we like on Illumos.\nextern \"C\" int madvise(void *, size_t, int);\n\nint internal_madvise(uptr addr, uptr length, int advice) {\n  return madvise((void *)addr, length, advice);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, close, fd_t fd) {\n  return _REAL(close)(fd);\n}\n\nextern \"C\" int _REAL64(open)(const char *, int, ...);\n\nuptr internal_open(const char *filename, int flags) {\n  return _REAL64(open)(filename, flags);\n}\n\nuptr internal_open(const char *filename, int flags, u32 mode) {\n  return _REAL64(open)(filename, flags, mode);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, read, fd_t fd, void *buf, uptr count) {\n  return _REAL(read)(fd, buf, count);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, write, fd_t fd, const void *buf, uptr count) {\n  return _REAL(write)(fd, buf, count);\n}\n\n// FIXME: There's only _ftruncate64 beginning with Solaris 11.\nDECLARE__REAL_AND_INTERNAL(uptr, ftruncate, fd_t fd, uptr size) {\n  return ftruncate(fd, size);\n}\n\nDECLARE__REAL_AND_INTERNAL64(uptr, stat, const char *path, void *buf) {\n  return _REAL64(stat)(path, (struct stat *)buf);\n}\n\nDECLARE__REAL_AND_INTERNAL64(uptr, lstat, const char *path, void *buf) {\n  return _REAL64(lstat)(path, (struct stat *)buf);\n}\n\nDECLARE__REAL_AND_INTERNAL64(uptr, fstat, fd_t fd, void *buf) {\n  return _REAL64(fstat)(fd, (struct stat *)buf);\n}\n\nuptr internal_filesize(fd_t fd) {\n  struct stat st;\n  if (internal_fstat(fd, &st))\n    return -1;\n  return (uptr)st.st_size;\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, dup, int oldfd) {\n  return _REAL(dup)(oldfd);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, dup2, int oldfd, int newfd) {\n  return _REAL(dup2)(oldfd, newfd);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, readlink, const char *path, char *buf,\n                           uptr bufsize) {\n  return _REAL(readlink)(path, buf, bufsize);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, unlink, const char *path) {\n  return _REAL(unlink)(path);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, rename, const char *oldpath,\n                           const char *newpath) {\n  return _REAL(rename)(oldpath, newpath);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {\n  return sched_yield();\n}\n\nDECLARE__REAL_AND_INTERNAL(void, usleep, u64 useconds) {\n  struct timespec ts;\n  ts.tv_sec = useconds / 1000000;\n  ts.tv_nsec = (useconds % 1000000) * 1000;\n  nanosleep(&ts, nullptr);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,\n                           char *const argv[], char *const envp[]) {\n  return _REAL(execve)(filename, argv, envp);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, waitpid, int pid, int *status, int options) {\n  return _REAL(waitpid)(pid, status, options);\n}\n\nDECLARE__REAL_AND_INTERNAL(uptr, getpid, void) {\n  return _REAL(getpid)();\n}\n\n// FIXME: This might be wrong: _getdents doesn't take a struct linux_dirent *.\nDECLARE__REAL_AND_INTERNAL64(uptr, getdents, fd_t fd, struct linux_dirent *dirp,\n                             unsigned int count) {\n  return _REAL64(getdents)(fd, dirp, count);\n}\n\nDECLARE__REAL_AND_INTERNAL64(uptr, lseek, fd_t fd, OFF_T offset, int whence) {\n  return _REAL64(lseek)(fd, offset, whence);\n}\n\n// FIXME: This might be wrong: _sigfillset doesn't take a\n// __sanitizer_sigset_t *.\nDECLARE__REAL_AND_INTERNAL(void, sigfillset, __sanitizer_sigset_t *set) {\n  _REAL(sigfillset)(set);\n}\n\n// FIXME: This might be wrong: _sigprocmask doesn't take __sanitizer_sigset_t *.\nDECLARE__REAL_AND_INTERNAL(uptr, sigprocmask, int how,\n                           __sanitizer_sigset_t *set,\n                           __sanitizer_sigset_t *oldset) {\n  return _REAL(sigprocmask)(how, set, oldset);\n}\n\nDECLARE__REAL_AND_INTERNAL(int, fork, void) {\n  // TODO(glider): this may call user's pthread_atfork() handlers which is bad.\n  return _REAL(fork)();\n}\n\nu64 NanoTime() {\n  return gethrtime();\n}\n\nuptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {\n  // FIXME: No internal variant.\n  return clock_gettime(clk_id, (timespec *)tp);\n}\n\n// ----------------- sanitizer_common.h\nvoid FutexWait(atomic_uint32_t *p, u32 cmp) {\n  // FIXME: implement actual blocking.\n  sched_yield();\n}\n\nvoid FutexWake(atomic_uint32_t *p, u32 count) {}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SOLARIS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stack_store.cpp",
    "content": "//===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_stack_store.h\"\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_leb128.h\"\n#include \"sanitizer_lzw.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_stacktrace.h\"\n\nnamespace __sanitizer {\n\nnamespace {\nstruct StackTraceHeader {\n  static constexpr u32 kStackSizeBits = 8;\n\n  u8 size;\n  u8 tag;\n  explicit StackTraceHeader(const StackTrace &trace)\n      : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {\n    CHECK_EQ(trace.tag, static_cast<uptr>(tag));\n  }\n  explicit StackTraceHeader(uptr h)\n      : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}\n\n  uptr ToUptr() const {\n    return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);\n  }\n};\n}  // namespace\n\nStackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {\n  if (!trace.size && !trace.tag)\n    return 0;\n  StackTraceHeader h(trace);\n  uptr idx = 0;\n  *pack = 0;\n  uptr *stack_trace = Alloc(h.size + 1, &idx, pack);\n  *stack_trace = h.ToUptr();\n  internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));\n  *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);\n  return OffsetToId(idx);\n}\n\nStackTrace StackStore::Load(Id id) {\n  if (!id)\n    return {};\n  uptr idx = IdToOffset(id);\n  uptr block_idx = GetBlockIdx(idx);\n  CHECK_LT(block_idx, ARRAY_SIZE(blocks_));\n  const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);\n  if (!stack_trace)\n    return {};\n  stack_trace += GetInBlockIdx(idx);\n  StackTraceHeader h(*stack_trace);\n  return StackTrace(stack_trace + 1, h.size, h.tag);\n}\n\nuptr StackStore::Allocated() const {\n  return atomic_load_relaxed(&allocated_) + sizeof(*this);\n}\n\nuptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {\n  for (;;) {\n    // Optimisic lock-free allocation, essentially try to bump the\n    // total_frames_.\n    uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);\n    uptr block_idx = GetBlockIdx(start);\n    uptr last_idx = GetBlockIdx(start + count - 1);\n    if (LIKELY(block_idx == last_idx)) {\n      // Fits into the a single block.\n      CHECK_LT(block_idx, ARRAY_SIZE(blocks_));\n      *idx = start;\n      return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);\n    }\n\n    // Retry. We can't use range allocated in two different blocks.\n    CHECK_LE(count, kBlockSizeFrames);\n    uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);\n    // Mark tail/head of these blocks as \"stored\".to avoid waiting before we can\n    // Pack().\n    *pack += blocks_[block_idx].Stored(in_first);\n    *pack += blocks_[last_idx].Stored(count - in_first);\n  }\n}\n\nvoid *StackStore::Map(uptr size, const char *mem_type) {\n  atomic_fetch_add(&allocated_, size, memory_order_relaxed);\n  return MmapNoReserveOrDie(size, mem_type);\n}\n\nvoid StackStore::Unmap(void *addr, uptr size) {\n  atomic_fetch_sub(&allocated_, size, memory_order_relaxed);\n  UnmapOrDie(addr, size);\n}\n\nuptr StackStore::Pack(Compression type) {\n  uptr res = 0;\n  for (BlockInfo &b : blocks_) res += b.Pack(type, this);\n  return res;\n}\n\nvoid StackStore::LockAll() {\n  for (BlockInfo &b : blocks_) b.Lock();\n}\n\nvoid StackStore::UnlockAll() {\n  for (BlockInfo &b : blocks_) b.Unlock();\n}\n\nvoid StackStore::TestOnlyUnmap() {\n  for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);\n  internal_memset(this, 0, sizeof(*this));\n}\n\nuptr *StackStore::BlockInfo::Get() const {\n  // Idiomatic double-checked locking uses memory_order_acquire here. But\n  // relaxed is fine for us, justification is similar to\n  // TwoLevelMap::GetOrCreate.\n  return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));\n}\n\nuptr *StackStore::BlockInfo::Create(StackStore *store) {\n  SpinMutexLock l(&mtx_);\n  uptr *ptr = Get();\n  if (!ptr) {\n    ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, \"StackStore\"));\n    atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);\n  }\n  return ptr;\n}\n\nuptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {\n  uptr *ptr = Get();\n  if (LIKELY(ptr))\n    return ptr;\n  return Create(store);\n}\n\nclass SLeb128Encoder {\n public:\n  SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}\n\n  bool operator==(const SLeb128Encoder &other) const {\n    return begin == other.begin;\n  }\n\n  bool operator!=(const SLeb128Encoder &other) const {\n    return begin != other.begin;\n  }\n\n  SLeb128Encoder &operator=(uptr v) {\n    sptr diff = v - previous;\n    begin = EncodeSLEB128(diff, begin, end);\n    previous = v;\n    return *this;\n  }\n  SLeb128Encoder &operator*() { return *this; }\n  SLeb128Encoder &operator++() { return *this; }\n\n  u8 *base() const { return begin; }\n\n private:\n  u8 *begin;\n  u8 *end;\n  uptr previous = 0;\n};\n\nclass SLeb128Decoder {\n public:\n  SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}\n\n  bool operator==(const SLeb128Decoder &other) const {\n    return begin == other.begin;\n  }\n\n  bool operator!=(const SLeb128Decoder &other) const {\n    return begin != other.begin;\n  }\n\n  uptr operator*() {\n    sptr diff;\n    begin = DecodeSLEB128(begin, end, &diff);\n    previous += diff;\n    return previous;\n  }\n  SLeb128Decoder &operator++() { return *this; }\n\n  SLeb128Decoder operator++(int) { return *this; }\n\n private:\n  const u8 *begin;\n  const u8 *end;\n  uptr previous = 0;\n};\n\nstatic u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,\n                         u8 *to_end) {\n  SLeb128Encoder encoder(to, to_end);\n  for (; from != from_end; ++from, ++encoder) *encoder = *from;\n  return encoder.base();\n}\n\nstatic uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,\n                             uptr *to_end) {\n  SLeb128Decoder decoder(from, from_end);\n  SLeb128Decoder end(from_end, from_end);\n  for (; decoder != end; ++to, ++decoder) *to = *decoder;\n  CHECK_EQ(to, to_end);\n  return to;\n}\n\nstatic u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,\n                       u8 *to_end) {\n  SLeb128Encoder encoder(to, to_end);\n  encoder = LzwEncode<uptr>(from, from_end, encoder);\n  return encoder.base();\n}\n\nstatic uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,\n                           uptr *to_end) {\n  SLeb128Decoder decoder(from, from_end);\n  SLeb128Decoder end(from_end, from_end);\n  to = LzwDecode<uptr>(decoder, end, to);\n  CHECK_EQ(to, to_end);\n  return to;\n}\n\n#if defined(_MSC_VER) && !defined(__clang__)\n#  pragma warning(push)\n// Disable 'nonstandard extension used: zero-sized array in struct/union'.\n#  pragma warning(disable : 4200)\n#endif\nnamespace {\nstruct PackedHeader {\n  uptr size;\n  StackStore::Compression type;\n  u8 data[];\n};\n}  // namespace\n#if defined(_MSC_VER) && !defined(__clang__)\n#  pragma warning(pop)\n#endif\n\nuptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {\n  SpinMutexLock l(&mtx_);\n  switch (state) {\n    case State::Storing:\n      state = State::Unpacked;\n      FALLTHROUGH;\n    case State::Unpacked:\n      return Get();\n    case State::Packed:\n      break;\n  }\n\n  u8 *ptr = reinterpret_cast<u8 *>(Get());\n  CHECK_NE(nullptr, ptr);\n  const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);\n  CHECK_LE(header->size, kBlockSizeBytes);\n  CHECK_GE(header->size, sizeof(PackedHeader));\n\n  uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());\n\n  uptr *unpacked =\n      reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, \"StackStoreUnpack\"));\n\n  uptr *unpacked_end;\n  switch (header->type) {\n    case Compression::Delta:\n      unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,\n                                     unpacked + kBlockSizeFrames);\n      break;\n    case Compression::LZW:\n      unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,\n                                   unpacked + kBlockSizeFrames);\n      break;\n    default:\n      UNREACHABLE(\"Unexpected type\");\n      break;\n  }\n\n  CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);\n\n  MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);\n  atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);\n  store->Unmap(ptr, packed_size_aligned);\n\n  state = State::Unpacked;\n  return Get();\n}\n\nuptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {\n  if (type == Compression::None)\n    return 0;\n\n  SpinMutexLock l(&mtx_);\n  switch (state) {\n    case State::Unpacked:\n    case State::Packed:\n      return 0;\n    case State::Storing:\n      break;\n  }\n\n  uptr *ptr = Get();\n  if (!ptr || !Stored(0))\n    return 0;\n\n  u8 *packed =\n      reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, \"StackStorePack\"));\n  PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);\n  u8 *alloc_end = packed + kBlockSizeBytes;\n\n  u8 *packed_end = nullptr;\n  switch (type) {\n    case Compression::Delta:\n      packed_end =\n          CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);\n      break;\n    case Compression::LZW:\n      packed_end =\n          CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);\n      break;\n    default:\n      UNREACHABLE(\"Unexpected type\");\n      break;\n  }\n\n  header->type = type;\n  header->size = packed_end - packed;\n\n  VPrintf(1, \"Packed block of %zu KiB to %zu KiB\\n\", kBlockSizeBytes >> 10,\n          header->size >> 10);\n\n  if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {\n    VPrintf(1, \"Undo and keep block unpacked\\n\");\n    MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);\n    store->Unmap(packed, kBlockSizeBytes);\n    state = State::Unpacked;\n    return 0;\n  }\n\n  uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());\n  store->Unmap(packed + packed_size_aligned,\n               kBlockSizeBytes - packed_size_aligned);\n  MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);\n\n  atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);\n  store->Unmap(ptr, kBlockSizeBytes);\n\n  state = State::Packed;\n  return kBlockSizeBytes - packed_size_aligned;\n}\n\nvoid StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {\n  if (uptr *ptr = Get())\n    store->Unmap(ptr, kBlockSizeBytes);\n}\n\nbool StackStore::BlockInfo::Stored(uptr n) {\n  return n + atomic_fetch_add(&stored_, n, memory_order_release) ==\n         kBlockSizeFrames;\n}\n\nbool StackStore::BlockInfo::IsPacked() const {\n  SpinMutexLock l(&mtx_);\n  return state == State::Packed;\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stack_store.h",
    "content": "//===-- sanitizer_stack_store.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_STACK_STORE_H\n#define SANITIZER_STACK_STORE_H\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_stacktrace.h\"\n\nnamespace __sanitizer {\n\nclass StackStore {\n  static constexpr uptr kBlockSizeFrames = 0x100000;\n  static constexpr uptr kBlockCount = 0x1000;\n  static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr);\n\n public:\n  enum class Compression : u8 {\n    None = 0,\n    Delta,\n    LZW,\n  };\n\n  constexpr StackStore() = default;\n\n  using Id = u32;  // Enough for 2^32 * sizeof(uptr) bytes of traces.\n  static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8),\n                \"\");\n\n  Id Store(const StackTrace &trace,\n           uptr *pack /* number of blocks completed by this call */);\n  StackTrace Load(Id id);\n  uptr Allocated() const;\n\n  // Packs all blocks which don't expect any more writes. A block is going to be\n  // packed once. As soon trace from that block was requested, it will unpack\n  // and stay unpacked after that.\n  // Returns the number of released bytes.\n  uptr Pack(Compression type);\n\n  void LockAll();\n  void UnlockAll();\n\n  void TestOnlyUnmap();\n\n private:\n  friend class StackStoreTest;\n  static constexpr uptr GetBlockIdx(uptr frame_idx) {\n    return frame_idx / kBlockSizeFrames;\n  }\n\n  static constexpr uptr GetInBlockIdx(uptr frame_idx) {\n    return frame_idx % kBlockSizeFrames;\n  }\n\n  static constexpr uptr IdToOffset(Id id) {\n    CHECK_NE(id, 0);\n    return id - 1;  // Avoid zero as id.\n  }\n\n  static constexpr uptr OffsetToId(Id id) {\n    // This makes UINT32_MAX to 0 and it will be retrived as and empty stack.\n    // But this is not a problem as we will not be able to store anything after\n    // that anyway.\n    return id + 1;  // Avoid zero as id.\n  }\n\n  uptr *Alloc(uptr count, uptr *idx, uptr *pack);\n\n  void *Map(uptr size, const char *mem_type);\n  void Unmap(void *addr, uptr size);\n\n  // Total number of allocated frames.\n  atomic_uintptr_t total_frames_ = {};\n\n  // Tracks total allocated memory in bytes.\n  atomic_uintptr_t allocated_ = {};\n\n  // Each block will hold pointer to exactly kBlockSizeFrames.\n  class BlockInfo {\n    atomic_uintptr_t data_;\n    // Counter to track store progress to know when we can Pack() the block.\n    atomic_uint32_t stored_;\n    // Protects alloc of new blocks.\n    mutable StaticSpinMutex mtx_;\n\n    enum class State : u8 {\n      Storing = 0,\n      Packed,\n      Unpacked,\n    };\n    State state SANITIZER_GUARDED_BY(mtx_);\n\n    uptr *Create(StackStore *store);\n\n   public:\n    uptr *Get() const;\n    uptr *GetOrCreate(StackStore *store);\n    uptr *GetOrUnpack(StackStore *store);\n    uptr Pack(Compression type, StackStore *store);\n    void TestOnlyUnmap(StackStore *store);\n    bool Stored(uptr n);\n    bool IsPacked() const;\n    void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }\n    void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }\n  };\n\n  BlockInfo blocks_[kBlockCount] = {};\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_STACK_STORE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stackdepot.cpp",
    "content": "//===-- sanitizer_stackdepot.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_stackdepot.h\"\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_hash.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_stack_store.h\"\n#include \"sanitizer_stackdepotbase.h\"\n\nnamespace __sanitizer {\n\nstruct StackDepotNode {\n  using hash_type = u64;\n  hash_type stack_hash;\n  u32 link;\n  StackStore::Id store_id;\n\n  static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;\n\n  typedef StackTrace args_type;\n  bool eq(hash_type hash, const args_type &args) const {\n    return hash == stack_hash;\n  }\n  static uptr allocated();\n  static hash_type hash(const args_type &args) {\n    MurMur2Hash64Builder H(args.size * sizeof(uptr));\n    for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);\n    H.add(args.tag);\n    return H.get();\n  }\n  static bool is_valid(const args_type &args) {\n    return args.size > 0 && args.trace;\n  }\n  void store(u32 id, const args_type &args, hash_type hash);\n  args_type load(u32 id) const;\n  static StackDepotHandle get_handle(u32 id);\n\n  typedef StackDepotHandle handle_type;\n};\n\nstatic StackStore stackStore;\n\n// FIXME(dvyukov): this single reserved bit is used in TSan.\ntypedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>\n    StackDepot;\nstatic StackDepot theDepot;\n// Keep mutable data out of frequently access nodes to improve caching\n// efficiency.\nstatic TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,\n                   StackDepot::kNodesSize2>\n    useCounts;\n\nint StackDepotHandle::use_count() const {\n  return atomic_load_relaxed(&useCounts[id_]);\n}\n\nvoid StackDepotHandle::inc_use_count_unsafe() {\n  atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);\n}\n\nuptr StackDepotNode::allocated() {\n  return stackStore.Allocated() + useCounts.MemoryUsage();\n}\n\nstatic void CompressStackStore() {\n  u64 start = MonotonicNanoTime();\n  uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(\n      Abs(common_flags()->compress_stack_depot)));\n  if (!diff)\n    return;\n  u64 finish = MonotonicNanoTime();\n  uptr total_before = theDepot.GetStats().allocated + diff;\n  VPrintf(1, \"%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\\n\",\n          SanitizerToolName, diff >> 10, total_before >> 10,\n          (finish - start) / 1000000);\n}\n\nnamespace {\n\nclass CompressThread {\n public:\n  constexpr CompressThread() = default;\n  void NewWorkNotify();\n  void Stop();\n  void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;\n  void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;\n\n private:\n  enum class State {\n    NotStarted = 0,\n    Started,\n    Failed,\n    Stopped,\n  };\n\n  void Run();\n\n  bool WaitForWork() {\n    semaphore_.Wait();\n    return atomic_load(&run_, memory_order_acquire);\n  }\n\n  Semaphore semaphore_ = {};\n  StaticSpinMutex mutex_ = {};\n  State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;\n  void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;\n  atomic_uint8_t run_ = {};\n};\n\nstatic CompressThread compress_thread;\n\nvoid CompressThread::NewWorkNotify() {\n  int compress = common_flags()->compress_stack_depot;\n  if (!compress)\n    return;\n  if (compress > 0 /* for testing or debugging */) {\n    SpinMutexLock l(&mutex_);\n    if (state_ == State::NotStarted) {\n      atomic_store(&run_, 1, memory_order_release);\n      CHECK_EQ(nullptr, thread_);\n      thread_ = internal_start_thread(\n          [](void *arg) -> void * {\n            reinterpret_cast<CompressThread *>(arg)->Run();\n            return nullptr;\n          },\n          this);\n      state_ = thread_ ? State::Started : State::Failed;\n    }\n    if (state_ == State::Started) {\n      semaphore_.Post();\n      return;\n    }\n  }\n  CompressStackStore();\n}\n\nvoid CompressThread::Run() {\n  VPrintf(1, \"%s: StackDepot compression thread started\\n\", SanitizerToolName);\n  while (WaitForWork()) CompressStackStore();\n  VPrintf(1, \"%s: StackDepot compression thread stopped\\n\", SanitizerToolName);\n}\n\nvoid CompressThread::Stop() {\n  void *t = nullptr;\n  {\n    SpinMutexLock l(&mutex_);\n    if (state_ != State::Started)\n      return;\n    state_ = State::Stopped;\n    CHECK_NE(nullptr, thread_);\n    t = thread_;\n    thread_ = nullptr;\n  }\n  atomic_store(&run_, 0, memory_order_release);\n  semaphore_.Post();\n  internal_join_thread(t);\n}\n\nvoid CompressThread::LockAndStop() {\n  mutex_.Lock();\n  if (state_ != State::Started)\n    return;\n  CHECK_NE(nullptr, thread_);\n\n  atomic_store(&run_, 0, memory_order_release);\n  semaphore_.Post();\n  internal_join_thread(thread_);\n  // Allow to restart after Unlock() if needed.\n  state_ = State::NotStarted;\n  thread_ = nullptr;\n}\n\nvoid CompressThread::Unlock() { mutex_.Unlock(); }\n\n}  // namespace\n\nvoid StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {\n  stack_hash = hash;\n  uptr pack = 0;\n  store_id = stackStore.Store(args, &pack);\n  if (LIKELY(!pack))\n    return;\n  compress_thread.NewWorkNotify();\n}\n\nStackDepotNode::args_type StackDepotNode::load(u32 id) const {\n  if (!store_id)\n    return {};\n  return stackStore.Load(store_id);\n}\n\nStackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }\n\nu32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }\n\nStackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {\n  return StackDepotNode::get_handle(theDepot.Put(stack));\n}\n\nStackTrace StackDepotGet(u32 id) {\n  return theDepot.Get(id);\n}\n\nvoid StackDepotLockAll() {\n  theDepot.LockAll();\n  compress_thread.LockAndStop();\n  stackStore.LockAll();\n}\n\nvoid StackDepotUnlockAll() {\n  stackStore.UnlockAll();\n  compress_thread.Unlock();\n  theDepot.UnlockAll();\n}\n\nvoid StackDepotPrintAll() {\n#if !SANITIZER_GO\n  theDepot.PrintAll();\n#endif\n}\n\nvoid StackDepotStopBackgroundThread() { compress_thread.Stop(); }\n\nStackDepotHandle StackDepotNode::get_handle(u32 id) {\n  return StackDepotHandle(&theDepot.nodes[id], id);\n}\n\nvoid StackDepotTestOnlyUnmap() {\n  theDepot.TestOnlyUnmap();\n  stackStore.TestOnlyUnmap();\n}\n\n} // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stackdepot.h",
    "content": "//===-- sanitizer_stackdepot.h ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_STACKDEPOT_H\n#define SANITIZER_STACKDEPOT_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_stacktrace.h\"\n\nnamespace __sanitizer {\n\n// StackDepot efficiently stores huge amounts of stack traces.\nstruct StackDepotNode;\nstruct StackDepotHandle {\n  StackDepotNode *node_ = nullptr;\n  u32 id_ = 0;\n  StackDepotHandle(StackDepotNode *node, u32 id) : node_(node), id_(id) {}\n  bool valid() const { return node_; }\n  u32 id() const { return id_; }\n  int use_count() const;\n  void inc_use_count_unsafe();\n};\n\nconst int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);\n\nStackDepotStats StackDepotGetStats();\nu32 StackDepotPut(StackTrace stack);\nStackDepotHandle StackDepotPut_WithHandle(StackTrace stack);\n// Retrieves a stored stack trace by the id.\nStackTrace StackDepotGet(u32 id);\n\nvoid StackDepotLockAll();\nvoid StackDepotUnlockAll();\nvoid StackDepotPrintAll();\nvoid StackDepotStopBackgroundThread();\n\nvoid StackDepotTestOnlyUnmap();\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_STACKDEPOT_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stackdepotbase.h",
    "content": "//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementation of a mapping from arbitrary values to unique 32-bit\n// identifiers.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_STACKDEPOTBASE_H\n#define SANITIZER_STACKDEPOTBASE_H\n\n#include <stdio.h>\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_flat_map.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nclass StackDepotBase {\n  static constexpr u32 kIdSizeLog =\n      sizeof(u32) * 8 - Max(kReservedBits, 1 /* At least 1 bit for locking. */);\n  static constexpr u32 kNodesSize1Log = kIdSizeLog / 2;\n  static constexpr u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log;\n  static constexpr int kTabSize = 1 << kTabSizeLog;  // Hash table size.\n  static constexpr u32 kUnlockMask = (1ull << kIdSizeLog) - 1;\n  static constexpr u32 kLockMask = ~kUnlockMask;\n\n public:\n  typedef typename Node::args_type args_type;\n  typedef typename Node::handle_type handle_type;\n  typedef typename Node::hash_type hash_type;\n\n  static constexpr u64 kNodesSize1 = 1ull << kNodesSize1Log;\n  static constexpr u64 kNodesSize2 = 1ull << kNodesSize2Log;\n\n  // Maps stack trace to an unique id.\n  u32 Put(args_type args, bool *inserted = nullptr);\n  // Retrieves a stored stack trace by the id.\n  args_type Get(u32 id);\n\n  StackDepotStats GetStats() const {\n    return {\n        atomic_load_relaxed(&n_uniq_ids),\n        nodes.MemoryUsage() + Node::allocated(),\n    };\n  }\n\n  void LockAll();\n  void UnlockAll();\n  void PrintAll();\n\n  void TestOnlyUnmap() {\n    nodes.TestOnlyUnmap();\n    internal_memset(this, 0, sizeof(*this));\n  }\n\n private:\n  friend Node;\n  u32 find(u32 s, args_type args, hash_type hash) const;\n  static u32 lock(atomic_uint32_t *p);\n  static void unlock(atomic_uint32_t *p, u32 s);\n  atomic_uint32_t tab[kTabSize];  // Hash table of Node's.\n\n  atomic_uint32_t n_uniq_ids;\n\n  TwoLevelMap<Node, kNodesSize1, kNodesSize2> nodes;\n\n  friend class StackDepotReverseMap;\n};\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nu32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(\n    u32 s, args_type args, hash_type hash) const {\n  // Searches linked list s for the stack, returns its id.\n  for (; s;) {\n    const Node &node = nodes[s];\n    if (node.eq(hash, args))\n      return s;\n    s = node.link;\n  }\n  return 0;\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nu32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(atomic_uint32_t *p) {\n  // Uses the pointer lsb as mutex.\n  for (int i = 0;; i++) {\n    u32 cmp = atomic_load(p, memory_order_relaxed);\n    if ((cmp & kLockMask) == 0 &&\n        atomic_compare_exchange_weak(p, &cmp, cmp | kLockMask,\n                                     memory_order_acquire))\n      return cmp;\n    if (i < 10)\n      proc_yield(10);\n    else\n      internal_sched_yield();\n  }\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nvoid StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(\n    atomic_uint32_t *p, u32 s) {\n  DCHECK_EQ(s & kLockMask, 0);\n  atomic_store(p, s, memory_order_release);\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nu32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,\n                                                          bool *inserted) {\n  if (inserted)\n    *inserted = false;\n  if (!LIKELY(Node::is_valid(args)))\n    return 0;\n  hash_type h = Node::hash(args);\n  atomic_uint32_t *p = &tab[h % kTabSize];\n  u32 v = atomic_load(p, memory_order_consume);\n  u32 s = v & kUnlockMask;\n  // First, try to find the existing stack.\n  u32 node = find(s, args, h);\n  if (LIKELY(node))\n    return node;\n\n  // If failed, lock, retry and insert new.\n  u32 s2 = lock(p);\n  if (s2 != s) {\n    node = find(s2, args, h);\n    if (node) {\n      unlock(p, s2);\n      return node;\n    }\n  }\n  s = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1;\n  CHECK_EQ(s & kUnlockMask, s);\n  CHECK_EQ(s & (((u32)-1) >> kReservedBits), s);\n  Node &new_node = nodes[s];\n  new_node.store(s, args, h);\n  new_node.link = s2;\n  unlock(p, s);\n  if (inserted) *inserted = true;\n  return s;\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\ntypename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type\nStackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {\n  if (id == 0)\n    return args_type();\n  CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);\n  if (!nodes.contains(id))\n    return args_type();\n  const Node &node = nodes[id];\n  return node.load(id);\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nvoid StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {\n  for (int i = 0; i < kTabSize; ++i) {\n    lock(&tab[i]);\n  }\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nvoid StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {\n  for (int i = 0; i < kTabSize; ++i) {\n    atomic_uint32_t *p = &tab[i];\n    uptr s = atomic_load(p, memory_order_relaxed);\n    unlock(p, s & kUnlockMask);\n  }\n}\n\ntemplate <class Node, int kReservedBits, int kTabSizeLog>\nvoid StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {\n  for (int i = 0; i < kTabSize; ++i) {\n    atomic_uint32_t *p = &tab[i];\n    u32 s = atomic_load(p, memory_order_consume) & kUnlockMask;\n    for (; s;) {\n      const Node &node = nodes[s];\n      Printf(\"Stack for id %u:\\n\", s);\n      node.load(s).Print();\n      s = node.link;\n    }\n  }\n}\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_STACKDEPOTBASE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stacktrace.cpp",
    "content": "//===-- sanitizer_stacktrace.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_stacktrace.h\"\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_platform.h\"\n#include \"sanitizer_ptrauth.h\"\n\nnamespace __sanitizer {\n\nuptr StackTrace::GetNextInstructionPc(uptr pc) {\n#if defined(__sparc__) || defined(__mips__)\n  return pc + 8;\n#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) || \\\n    defined(__hexagon__)\n  return STRIP_PAC_PC((void *)pc) + 4;\n#elif SANITIZER_RISCV64\n  // Current check order is 4 -> 2 -> 6 -> 8\n  u8 InsnByte = *(u8 *)(pc);\n  if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) {\n    // xxxxxxxxxxxbbb11 | 32 bit | bbb != 111\n    return pc + 4;\n  }\n  if ((InsnByte & 0x3) != 0x3) {\n    // xxxxxxxxxxxxxxaa | 16 bit | aa != 11\n    return pc + 2;\n  }\n  // RISC-V encoding allows instructions to be up to 8 bytes long\n  if ((InsnByte & 0x3f) == 0x1f) {\n    // xxxxxxxxxx011111 | 48 bit |\n    return pc + 6;\n  }\n  if ((InsnByte & 0x7f) == 0x3f) {\n    // xxxxxxxxx0111111 | 64 bit |\n    return pc + 8;\n  }\n  // bail-out if could not figure out the instruction size\n  return 0;\n#else\n  return pc + 1;\n#endif\n}\n\nuptr StackTrace::GetCurrentPc() {\n  return GET_CALLER_PC();\n}\n\nvoid BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {\n  size = cnt + !!extra_top_pc;\n  CHECK_LE(size, kStackTraceMax);\n  internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));\n  if (extra_top_pc)\n    trace_buffer[cnt] = extra_top_pc;\n  top_frame_bp = 0;\n}\n\n// Sparc implementation is in its own file.\n#if !defined(__sparc__)\n\n// In GCC on ARM bp points to saved lr, not fp, so we should check the next\n// cell in stack to be a saved frame pointer. GetCanonicFrame returns the\n// pointer to saved frame pointer in any case.\nstatic inline uhwptr *GetCanonicFrame(uptr bp,\n                                      uptr stack_top,\n                                      uptr stack_bottom) {\n  CHECK_GT(stack_top, stack_bottom);\n#ifdef __arm__\n  if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;\n  uhwptr *bp_prev = (uhwptr *)bp;\n  if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;\n  // The next frame pointer does not look right. This could be a GCC frame, step\n  // back by 1 word and try again.\n  if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))\n    return bp_prev - 1;\n  // Nope, this does not look right either. This means the frame after next does\n  // not have a valid frame pointer, but we can still extract the caller PC.\n  // Unfortunately, there is no way to decide between GCC and LLVM frame\n  // layouts. Assume LLVM.\n  return bp_prev;\n#else\n  return (uhwptr*)bp;\n#endif\n}\n\nvoid BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,\n                                    uptr stack_bottom, u32 max_depth) {\n  // TODO(yln): add arg sanity check for stack_top/stack_bottom\n  CHECK_GE(max_depth, 2);\n  const uptr kPageSize = GetPageSizeCached();\n  trace_buffer[0] = pc;\n  size = 1;\n  if (stack_top < 4096) return;  // Sanity check for stack top.\n  uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);\n  // Lowest possible address that makes sense as the next frame pointer.\n  // Goes up as we walk the stack.\n  uptr bottom = stack_bottom;\n  // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.\n  while (IsValidFrame((uptr)frame, stack_top, bottom) &&\n         IsAligned((uptr)frame, sizeof(*frame)) &&\n         size < max_depth) {\n#ifdef __powerpc__\n    // PowerPC ABIs specify that the return address is saved at offset\n    // 16 of the *caller's* stack frame.  Thus we must dereference the\n    // back chain to find the caller frame before extracting it.\n    uhwptr *caller_frame = (uhwptr*)frame[0];\n    if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||\n        !IsAligned((uptr)caller_frame, sizeof(uhwptr)))\n      break;\n    uhwptr pc1 = caller_frame[2];\n#elif defined(__s390__)\n    uhwptr pc1 = frame[14];\n#elif defined(__riscv)\n    // frame[-1] contains the return address\n    uhwptr pc1 = frame[-1];\n#else\n    uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]);\n#endif\n    // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and\n    // x86_64) is invalid and stop unwinding here.  If we're adding support for\n    // a platform where this isn't true, we need to reconsider this check.\n    if (pc1 < kPageSize)\n      break;\n    if (pc1 != pc) {\n      trace_buffer[size++] = (uptr) pc1;\n    }\n    bottom = (uptr)frame;\n#if defined(__riscv)\n    // frame[-2] contain fp of the previous frame\n    uptr new_bp = (uptr)frame[-2];\n#else\n    uptr new_bp = (uptr)frame[0];\n#endif\n    frame = GetCanonicFrame(new_bp, stack_top, bottom);\n  }\n}\n\n#endif  // !defined(__sparc__)\n\nvoid BufferedStackTrace::PopStackFrames(uptr count) {\n  CHECK_LT(count, size);\n  size -= count;\n  for (uptr i = 0; i < size; ++i) {\n    trace_buffer[i] = trace_buffer[i + count];\n  }\n}\n\nstatic uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; }\n\nuptr BufferedStackTrace::LocatePcInTrace(uptr pc) {\n  uptr best = 0;\n  for (uptr i = 1; i < size; ++i) {\n    if (Distance(trace[i], pc) < Distance(trace[best], pc)) best = i;\n  }\n  return best;\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stacktrace.h",
    "content": "//===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_STACKTRACE_H\n#define SANITIZER_STACKTRACE_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform.h\"\n\nnamespace __sanitizer {\n\nstruct BufferedStackTrace;\n\nstatic const u32 kStackTraceMax = 255;\n\n#if SANITIZER_LINUX && defined(__mips__)\n# define SANITIZER_CAN_FAST_UNWIND 0\n#elif SANITIZER_WINDOWS\n# define SANITIZER_CAN_FAST_UNWIND 0\n#else\n# define SANITIZER_CAN_FAST_UNWIND 1\n#endif\n\n// Fast unwind is the only option on Mac for now; we will need to\n// revisit this macro when slow unwind works on Mac, see\n// https://github.com/google/sanitizers/issues/137\n#if SANITIZER_MAC\n#  define SANITIZER_CAN_SLOW_UNWIND 0\n#else\n# define SANITIZER_CAN_SLOW_UNWIND 1\n#endif\n\nstruct StackTrace {\n  const uptr *trace;\n  u32 size;\n  u32 tag;\n\n  static const int TAG_UNKNOWN = 0;\n  static const int TAG_ALLOC = 1;\n  static const int TAG_DEALLOC = 2;\n  static const int TAG_CUSTOM = 100; // Tool specific tags start here.\n\n  StackTrace() : trace(nullptr), size(0), tag(0) {}\n  StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}\n  StackTrace(const uptr *trace, u32 size, u32 tag)\n      : trace(trace), size(size), tag(tag) {}\n\n  // Prints a symbolized stacktrace, followed by an empty line.\n  void Print() const;\n\n  // Prints a symbolized stacktrace to the output string, followed by an empty\n  // line.\n  void PrintTo(InternalScopedString *output) const;\n\n  // Prints a symbolized stacktrace to the output buffer, followed by an empty\n  // line. Returns the number of symbols that should have been written to buffer\n  // (not including trailing '\\0'). Thus, the string is truncated iff return\n  // value is not less than \"out_buf_size\".\n  uptr PrintTo(char *out_buf, uptr out_buf_size) const;\n\n  static bool WillUseFastUnwind(bool request_fast_unwind) {\n    if (!SANITIZER_CAN_FAST_UNWIND)\n      return false;\n    if (!SANITIZER_CAN_SLOW_UNWIND)\n      return true;\n    return request_fast_unwind;\n  }\n\n  static uptr GetCurrentPc();\n  static inline uptr GetPreviousInstructionPc(uptr pc);\n  static uptr GetNextInstructionPc(uptr pc);\n};\n\n// Performance-critical, must be in the header.\nALWAYS_INLINE\nuptr StackTrace::GetPreviousInstructionPc(uptr pc) {\n#if defined(__arm__)\n  // T32 (Thumb) branch instructions might be 16 or 32 bit long,\n  // so we return (pc-2) in that case in order to be safe.\n  // For A32 mode we return (pc-4) because all instructions are 32 bit long.\n  return (pc - 3) & (~1);\n#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)\n  // PCs are always 4 byte aligned.\n  return pc - 4;\n#elif defined(__sparc__) || defined(__mips__)\n  return pc - 8;\n#elif SANITIZER_RISCV64\n  // RV-64 has variable instruciton length...\n  // C extentions gives us 2-byte instructoins\n  // RV-64 has 4-byte instructions\n  // + RISCV architecture allows instructions up to 8 bytes\n  // It seems difficult to figure out the exact instruction length -\n  // pc - 2 seems like a safe option for the purposes of stack tracing\n  return pc - 2;\n#else\n  return pc - 1;\n#endif\n}\n\n// StackTrace that owns the buffer used to store the addresses.\nstruct BufferedStackTrace : public StackTrace {\n  uptr trace_buffer[kStackTraceMax];\n  uptr top_frame_bp;  // Optional bp of a top frame.\n\n  BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}\n\n  void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);\n\n  // Get the stack trace with the given pc and bp.\n  // The pc will be in the position 0 of the resulting stack trace.\n  // The bp may refer to the current frame or to the caller's frame.\n  void Unwind(uptr pc, uptr bp, void *context, bool request_fast,\n              u32 max_depth = kStackTraceMax) {\n    top_frame_bp = (max_depth > 0) ? bp : 0;\n    // Small max_depth optimization\n    if (max_depth <= 1) {\n      if (max_depth == 1)\n        trace_buffer[0] = pc;\n      size = max_depth;\n      return;\n    }\n    UnwindImpl(pc, bp, context, request_fast, max_depth);\n  }\n\n  void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,\n              uptr stack_bottom, bool request_fast_unwind);\n\n  void Reset() {\n    *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);\n    top_frame_bp = 0;\n  }\n\n private:\n  // Every runtime defines its own implementation of this method\n  void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,\n                  u32 max_depth);\n\n  // UnwindFast/Slow have platform-specific implementations\n  void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,\n                  u32 max_depth);\n  void UnwindSlow(uptr pc, u32 max_depth);\n  void UnwindSlow(uptr pc, void *context, u32 max_depth);\n\n  void PopStackFrames(uptr count);\n  uptr LocatePcInTrace(uptr pc);\n\n  BufferedStackTrace(const BufferedStackTrace &) = delete;\n  void operator=(const BufferedStackTrace &) = delete;\n\n  friend class FastUnwindTest;\n};\n\n#if defined(__s390x__)\nstatic const uptr kFrameSize = 160;\n#elif defined(__s390__)\nstatic const uptr kFrameSize = 96;\n#else\nstatic const uptr kFrameSize = 2 * sizeof(uhwptr);\n#endif\n\n// Check if given pointer points into allocated stack area.\nstatic inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {\n  return frame > stack_bottom && frame < stack_top - kFrameSize;\n}\n\n}  // namespace __sanitizer\n\n// Use this macro if you want to print stack trace with the caller\n// of the current function in the top frame.\n#define GET_CALLER_PC_BP \\\n  uptr bp = GET_CURRENT_FRAME();              \\\n  uptr pc = GET_CALLER_PC();\n\n#define GET_CALLER_PC_BP_SP \\\n  GET_CALLER_PC_BP;                           \\\n  uptr local_stack;                           \\\n  uptr sp = (uptr)&local_stack\n\n// Use this macro if you want to print stack trace with the current\n// function in the top frame.\n#define GET_CURRENT_PC_BP \\\n  uptr bp = GET_CURRENT_FRAME();              \\\n  uptr pc = StackTrace::GetCurrentPc()\n\n#define GET_CURRENT_PC_BP_SP \\\n  GET_CURRENT_PC_BP;                          \\\n  uptr local_stack;                           \\\n  uptr sp = (uptr)&local_stack\n\n// GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().\n// Optimized x86 version is faster than GetCurrentPc because\n// it does not involve a function call, instead it reads RIP register.\n// Reads of RIP by an instruction return RIP pointing to the next\n// instruction, which is exactly what we want here, thus 0 offset.\n// It needs to be a macro because otherwise we will get the name\n// of this function on the top of most stacks. Attribute artificial\n// does not do what it claims to do, unfortunatley. And attribute\n// __nodebug__ is clang-only. If we would have an attribute that\n// would remove this function from debug info, we could simply make\n// StackTrace::GetCurrentPc() faster.\n#if defined(__x86_64__)\n#  define GET_CURRENT_PC()                \\\n    (__extension__({                      \\\n      uptr pc;                            \\\n      asm(\"lea 0(%%rip), %0\" : \"=r\"(pc)); \\\n      pc;                                 \\\n    }))\n#else\n#  define GET_CURRENT_PC() StackTrace::GetCurrentPc()\n#endif\n\n#endif  // SANITIZER_STACKTRACE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stacktrace_libcdep.cpp",
    "content": "//===-- sanitizer_stacktrace_libcdep.cpp ----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_stacktrace.h\"\n#include \"sanitizer_stacktrace_printer.h\"\n#include \"sanitizer_symbolizer.h\"\n\nnamespace __sanitizer {\n\nnamespace {\n\nclass StackTraceTextPrinter {\n public:\n  StackTraceTextPrinter(const char *stack_trace_fmt, char frame_delimiter,\n                        InternalScopedString *output,\n                        InternalScopedString *dedup_token)\n      : stack_trace_fmt_(stack_trace_fmt),\n        frame_delimiter_(frame_delimiter),\n        output_(output),\n        dedup_token_(dedup_token),\n        symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}\n\n  bool ProcessAddressFrames(uptr pc) {\n    SymbolizedStack *frames = symbolize_\n                                  ? Symbolizer::GetOrInit()->SymbolizePC(pc)\n                                  : SymbolizedStack::New(pc);\n    if (!frames)\n      return false;\n\n    for (SymbolizedStack *cur = frames; cur; cur = cur->next) {\n      uptr prev_len = output_->length();\n      RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,\n                  symbolize_ ? &cur->info : nullptr,\n                  common_flags()->symbolize_vs_style,\n                  common_flags()->strip_path_prefix);\n\n      if (prev_len != output_->length())\n        output_->append(\"%c\", frame_delimiter_);\n\n      ExtendDedupToken(cur);\n    }\n    frames->ClearAll();\n    return true;\n  }\n\n private:\n  // Extend the dedup token by appending a new frame.\n  void ExtendDedupToken(SymbolizedStack *stack) {\n    if (!dedup_token_)\n      return;\n\n    if (dedup_frames_-- > 0) {\n      if (dedup_token_->length())\n        dedup_token_->append(\"--\");\n      if (stack->info.function != nullptr)\n        dedup_token_->append(\"%s\", stack->info.function);\n    }\n  }\n\n  const char *stack_trace_fmt_;\n  const char frame_delimiter_;\n  int dedup_frames_ = common_flags()->dedup_token_length;\n  uptr frame_num_ = 0;\n  InternalScopedString *output_;\n  InternalScopedString *dedup_token_;\n  const bool symbolize_ = false;\n};\n\nstatic void CopyStringToBuffer(const InternalScopedString &str, char *out_buf,\n                               uptr out_buf_size) {\n  if (!out_buf_size)\n    return;\n\n  CHECK_GT(out_buf_size, 0);\n  uptr copy_size = Min(str.length(), out_buf_size - 1);\n  internal_memcpy(out_buf, str.data(), copy_size);\n  out_buf[copy_size] = '\\0';\n}\n\n}  // namespace\n\nvoid StackTrace::PrintTo(InternalScopedString *output) const {\n  CHECK(output);\n\n  InternalScopedString dedup_token;\n  StackTraceTextPrinter printer(common_flags()->stack_trace_format, '\\n',\n                                output, &dedup_token);\n\n  if (trace == nullptr || size == 0) {\n    output->append(\"    <empty stack>\\n\\n\");\n    return;\n  }\n\n  for (uptr i = 0; i < size && trace[i]; i++) {\n    // PCs in stack traces are actually the return addresses, that is,\n    // addresses of the next instructions after the call.\n    uptr pc = GetPreviousInstructionPc(trace[i]);\n    CHECK(printer.ProcessAddressFrames(pc));\n  }\n\n  // Always add a trailing empty line after stack trace.\n  output->append(\"\\n\");\n\n  // Append deduplication token, if non-empty.\n  if (dedup_token.length())\n    output->append(\"DEDUP_TOKEN: %s\\n\", dedup_token.data());\n}\n\nuptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {\n  CHECK(out_buf);\n\n  InternalScopedString output;\n  PrintTo(&output);\n  CopyStringToBuffer(output, out_buf, out_buf_size);\n\n  return output.length();\n}\n\nvoid StackTrace::Print() const {\n  InternalScopedString output;\n  PrintTo(&output);\n  Printf(\"%s\", output.data());\n}\n\nvoid BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,\n                                uptr stack_top, uptr stack_bottom,\n                                bool request_fast_unwind) {\n  // Ensures all call sites get what they requested.\n  CHECK_EQ(request_fast_unwind, WillUseFastUnwind(request_fast_unwind));\n  top_frame_bp = (max_depth > 0) ? bp : 0;\n  // Avoid doing any work for small max_depth.\n  if (max_depth == 0) {\n    size = 0;\n    return;\n  }\n  if (max_depth == 1) {\n    size = 1;\n    trace_buffer[0] = pc;\n    return;\n  }\n  if (!WillUseFastUnwind(request_fast_unwind)) {\n#if SANITIZER_CAN_SLOW_UNWIND\n    if (context)\n      UnwindSlow(pc, context, max_depth);\n    else\n      UnwindSlow(pc, max_depth);\n    // If there are too few frames, the program may be built with\n    // -fno-asynchronous-unwind-tables. Fall back to fast unwinder below.\n    if (size > 2 || size >= max_depth)\n      return;\n#else\n    UNREACHABLE(\"slow unwind requested but not available\");\n#endif\n  }\n  UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);\n}\n\nstatic int GetModuleAndOffsetForPc(uptr pc, char *module_name,\n                                   uptr module_name_len, uptr *pc_offset) {\n  const char *found_module_name = nullptr;\n  bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(\n      pc, &found_module_name, pc_offset);\n\n  if (!ok) return false;\n\n  if (module_name && module_name_len) {\n    internal_strncpy(module_name, found_module_name, module_name_len);\n    module_name[module_name_len - 1] = '\\x00';\n  }\n  return true;\n}\n\n}  // namespace __sanitizer\nusing namespace __sanitizer;\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,\n                              uptr out_buf_size) {\n  if (!out_buf_size)\n    return;\n\n  pc = StackTrace::GetPreviousInstructionPc(pc);\n\n  InternalScopedString output;\n  StackTraceTextPrinter printer(fmt, '\\0', &output, nullptr);\n  if (!printer.ProcessAddressFrames(pc)) {\n    output.clear();\n    output.append(\"<can't symbolize>\");\n  }\n  CopyStringToBuffer(output, out_buf, out_buf_size);\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid __sanitizer_symbolize_global(uptr data_addr, const char *fmt,\n                                  char *out_buf, uptr out_buf_size) {\n  if (!out_buf_size) return;\n  out_buf[0] = 0;\n  DataInfo DI;\n  if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;\n  InternalScopedString data_desc;\n  RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);\n  internal_strncpy(out_buf, data_desc.data(), out_buf_size);\n  out_buf[out_buf_size - 1] = 0;\n}\n\nSANITIZER_INTERFACE_ATTRIBUTE\nint __sanitizer_get_module_and_offset_for_pc(uptr pc, char *module_name,\n                                             uptr module_name_len,\n                                             uptr *pc_offset) {\n  return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,\n                                              pc_offset);\n}\n}  // extern \"C\"\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stacktrace_printer.cpp",
    "content": "//===-- sanitizer_common.cpp ----------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizers' run-time libraries.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_stacktrace_printer.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_fuchsia.h\"\n\nnamespace __sanitizer {\n\n// sanitizer_symbolizer_markup.cpp implements these differently.\n#if !SANITIZER_SYMBOLIZER_MARKUP\n\nstatic const char *StripFunctionName(const char *function, const char *prefix) {\n  if (!function) return nullptr;\n  if (!prefix) return function;\n  uptr prefix_len = internal_strlen(prefix);\n  if (0 == internal_strncmp(function, prefix, prefix_len))\n    return function + prefix_len;\n  return function;\n}\n\nstatic const char *DemangleFunctionName(const char *function) {\n  if (!function) return nullptr;\n\n  // NetBSD uses indirection for old threading functions for historical reasons\n  // The mangled names are internal implementation detail and should not be\n  // exposed even in backtraces.\n#if SANITIZER_NETBSD\n  if (!internal_strcmp(function, \"__libc_mutex_init\"))\n    return \"pthread_mutex_init\";\n  if (!internal_strcmp(function, \"__libc_mutex_lock\"))\n    return \"pthread_mutex_lock\";\n  if (!internal_strcmp(function, \"__libc_mutex_trylock\"))\n    return \"pthread_mutex_trylock\";\n  if (!internal_strcmp(function, \"__libc_mutex_unlock\"))\n    return \"pthread_mutex_unlock\";\n  if (!internal_strcmp(function, \"__libc_mutex_destroy\"))\n    return \"pthread_mutex_destroy\";\n  if (!internal_strcmp(function, \"__libc_mutexattr_init\"))\n    return \"pthread_mutexattr_init\";\n  if (!internal_strcmp(function, \"__libc_mutexattr_settype\"))\n    return \"pthread_mutexattr_settype\";\n  if (!internal_strcmp(function, \"__libc_mutexattr_destroy\"))\n    return \"pthread_mutexattr_destroy\";\n  if (!internal_strcmp(function, \"__libc_cond_init\"))\n    return \"pthread_cond_init\";\n  if (!internal_strcmp(function, \"__libc_cond_signal\"))\n    return \"pthread_cond_signal\";\n  if (!internal_strcmp(function, \"__libc_cond_broadcast\"))\n    return \"pthread_cond_broadcast\";\n  if (!internal_strcmp(function, \"__libc_cond_wait\"))\n    return \"pthread_cond_wait\";\n  if (!internal_strcmp(function, \"__libc_cond_timedwait\"))\n    return \"pthread_cond_timedwait\";\n  if (!internal_strcmp(function, \"__libc_cond_destroy\"))\n    return \"pthread_cond_destroy\";\n  if (!internal_strcmp(function, \"__libc_rwlock_init\"))\n    return \"pthread_rwlock_init\";\n  if (!internal_strcmp(function, \"__libc_rwlock_rdlock\"))\n    return \"pthread_rwlock_rdlock\";\n  if (!internal_strcmp(function, \"__libc_rwlock_wrlock\"))\n    return \"pthread_rwlock_wrlock\";\n  if (!internal_strcmp(function, \"__libc_rwlock_tryrdlock\"))\n    return \"pthread_rwlock_tryrdlock\";\n  if (!internal_strcmp(function, \"__libc_rwlock_trywrlock\"))\n    return \"pthread_rwlock_trywrlock\";\n  if (!internal_strcmp(function, \"__libc_rwlock_unlock\"))\n    return \"pthread_rwlock_unlock\";\n  if (!internal_strcmp(function, \"__libc_rwlock_destroy\"))\n    return \"pthread_rwlock_destroy\";\n  if (!internal_strcmp(function, \"__libc_thr_keycreate\"))\n    return \"pthread_key_create\";\n  if (!internal_strcmp(function, \"__libc_thr_setspecific\"))\n    return \"pthread_setspecific\";\n  if (!internal_strcmp(function, \"__libc_thr_getspecific\"))\n    return \"pthread_getspecific\";\n  if (!internal_strcmp(function, \"__libc_thr_keydelete\"))\n    return \"pthread_key_delete\";\n  if (!internal_strcmp(function, \"__libc_thr_once\"))\n    return \"pthread_once\";\n  if (!internal_strcmp(function, \"__libc_thr_self\"))\n    return \"pthread_self\";\n  if (!internal_strcmp(function, \"__libc_thr_exit\"))\n    return \"pthread_exit\";\n  if (!internal_strcmp(function, \"__libc_thr_setcancelstate\"))\n    return \"pthread_setcancelstate\";\n  if (!internal_strcmp(function, \"__libc_thr_equal\"))\n    return \"pthread_equal\";\n  if (!internal_strcmp(function, \"__libc_thr_curcpu\"))\n    return \"pthread_curcpu_np\";\n  if (!internal_strcmp(function, \"__libc_thr_sigsetmask\"))\n    return \"pthread_sigmask\";\n#endif\n\n  return function;\n}\n\nstatic void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace,\n                                 InternalScopedString *buffer) {\n  if (info.uuid_size) {\n    if (PrefixSpace)\n      buffer->append(\" \");\n    buffer->append(\"(BuildId: \");\n    for (uptr i = 0; i < info.uuid_size; ++i) {\n      buffer->append(\"%02x\", info.uuid[i]);\n    }\n    buffer->append(\")\");\n  }\n}\n\nstatic const char kDefaultFormat[] = \"    #%n %p %F %L\";\n\nvoid RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,\n                 uptr address, const AddressInfo *info, bool vs_style,\n                 const char *strip_path_prefix, const char *strip_func_prefix) {\n  // info will be null in the case where symbolization is not needed for the\n  // given format. This ensures that the code below will get a hard failure\n  // rather than print incorrect information in case RenderNeedsSymbolization\n  // ever ends up out of sync with this function. If non-null, the addresses\n  // should match.\n  CHECK(!info || address == info->address);\n  if (0 == internal_strcmp(format, \"DEFAULT\"))\n    format = kDefaultFormat;\n  for (const char *p = format; *p != '\\0'; p++) {\n    if (*p != '%') {\n      buffer->append(\"%c\", *p);\n      continue;\n    }\n    p++;\n    switch (*p) {\n    case '%':\n      buffer->append(\"%%\");\n      break;\n    // Frame number and all fields of AddressInfo structure.\n    case 'n':\n      buffer->append(\"%u\", frame_no);\n      break;\n    case 'p':\n      buffer->append(\"0x%zx\", address);\n      break;\n    case 'm':\n      buffer->append(\"%s\", StripPathPrefix(info->module, strip_path_prefix));\n      break;\n    case 'o':\n      buffer->append(\"0x%zx\", info->module_offset);\n      break;\n    case 'b':\n      MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);\n      break;\n    case 'f':\n      buffer->append(\"%s\", DemangleFunctionName(StripFunctionName(\n                               info->function, strip_func_prefix)));\n      break;\n    case 'q':\n      buffer->append(\"0x%zx\", info->function_offset != AddressInfo::kUnknown\n                                  ? info->function_offset\n                                  : 0x0);\n      break;\n    case 's':\n      buffer->append(\"%s\", StripPathPrefix(info->file, strip_path_prefix));\n      break;\n    case 'l':\n      buffer->append(\"%d\", info->line);\n      break;\n    case 'c':\n      buffer->append(\"%d\", info->column);\n      break;\n    // Smarter special cases.\n    case 'F':\n      // Function name and offset, if file is unknown.\n      if (info->function) {\n        buffer->append(\"in %s\", DemangleFunctionName(StripFunctionName(\n                                    info->function, strip_func_prefix)));\n        if (!info->file && info->function_offset != AddressInfo::kUnknown)\n          buffer->append(\"+0x%zx\", info->function_offset);\n      }\n      break;\n    case 'S':\n      // File/line information.\n      RenderSourceLocation(buffer, info->file, info->line, info->column,\n                           vs_style, strip_path_prefix);\n      break;\n    case 'L':\n      // Source location, or module location.\n      if (info->file) {\n        RenderSourceLocation(buffer, info->file, info->line, info->column,\n                             vs_style, strip_path_prefix);\n      } else if (info->module) {\n        RenderModuleLocation(buffer, info->module, info->module_offset,\n                             info->module_arch, strip_path_prefix);\n\n        MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);\n      } else {\n        buffer->append(\"(<unknown module>)\");\n      }\n      break;\n    case 'M':\n      // Module basename and offset, or PC.\n      if (address & kExternalPCBit) {\n        // There PCs are not meaningful.\n      } else if (info->module) {\n        // Always strip the module name for %M.\n        RenderModuleLocation(buffer, StripModuleName(info->module),\n                             info->module_offset, info->module_arch, \"\");\n        MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);\n      } else {\n        buffer->append(\"(%p)\", (void *)address);\n      }\n      break;\n    default:\n      Report(\"Unsupported specifier in stack frame format: %c (%p)!\\n\", *p,\n             (void *)p);\n      Die();\n    }\n  }\n}\n\nbool RenderNeedsSymbolization(const char *format) {\n  if (0 == internal_strcmp(format, \"DEFAULT\"))\n    format = kDefaultFormat;\n  for (const char *p = format; *p != '\\0'; p++) {\n    if (*p != '%')\n      continue;\n    p++;\n    switch (*p) {\n      case '%':\n        break;\n      case 'n':\n        // frame_no\n        break;\n      case 'p':\n        // address\n        break;\n      default:\n        return true;\n    }\n  }\n  return false;\n}\n\nvoid RenderData(InternalScopedString *buffer, const char *format,\n                const DataInfo *DI, const char *strip_path_prefix) {\n  for (const char *p = format; *p != '\\0'; p++) {\n    if (*p != '%') {\n      buffer->append(\"%c\", *p);\n      continue;\n    }\n    p++;\n    switch (*p) {\n      case '%':\n        buffer->append(\"%%\");\n        break;\n      case 's':\n        buffer->append(\"%s\", StripPathPrefix(DI->file, strip_path_prefix));\n        break;\n      case 'l':\n        buffer->append(\"%zu\", DI->line);\n        break;\n      case 'g':\n        buffer->append(\"%s\", DI->name);\n        break;\n      default:\n        Report(\"Unsupported specifier in stack frame format: %c (%p)!\\n\", *p,\n               (void *)p);\n        Die();\n    }\n  }\n}\n\n#endif  // !SANITIZER_SYMBOLIZER_MARKUP\n\nvoid RenderSourceLocation(InternalScopedString *buffer, const char *file,\n                          int line, int column, bool vs_style,\n                          const char *strip_path_prefix) {\n  if (vs_style && line > 0) {\n    buffer->append(\"%s(%d\", StripPathPrefix(file, strip_path_prefix), line);\n    if (column > 0)\n      buffer->append(\",%d\", column);\n    buffer->append(\")\");\n    return;\n  }\n\n  buffer->append(\"%s\", StripPathPrefix(file, strip_path_prefix));\n  if (line > 0) {\n    buffer->append(\":%d\", line);\n    if (column > 0)\n      buffer->append(\":%d\", column);\n  }\n}\n\nvoid RenderModuleLocation(InternalScopedString *buffer, const char *module,\n                          uptr offset, ModuleArch arch,\n                          const char *strip_path_prefix) {\n  buffer->append(\"(%s\", StripPathPrefix(module, strip_path_prefix));\n  if (arch != kModuleArchUnknown) {\n    buffer->append(\":%s\", ModuleArchToString(arch));\n  }\n  buffer->append(\"+0x%zx)\", offset);\n}\n\n} // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stacktrace_printer.h",
    "content": "//===-- sanitizer_stacktrace_printer.h --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizers' run-time libraries.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_STACKTRACE_PRINTER_H\n#define SANITIZER_STACKTRACE_PRINTER_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_symbolizer.h\"\n\nnamespace __sanitizer {\n\n// Render the contents of \"info\" structure, which represents the contents of\n// stack frame \"frame_no\" and appends it to the \"buffer\". \"format\" is a\n// string with placeholders, which is copied to the output with\n// placeholders substituted with the contents of \"info\". For example,\n// format string\n//   \"  frame %n: function %F at %S\"\n// will be turned into\n//   \"  frame 10: function foo::bar() at my/file.cc:10\"\n// You may additionally pass \"strip_path_prefix\" to strip prefixes of paths to\n// source files and modules, and \"strip_func_prefix\" to strip prefixes of\n// function names.\n// Here's the full list of available placeholders:\n//   %% - represents a '%' character;\n//   %n - frame number (copy of frame_no);\n//   %p - PC in hex format;\n//   %m - path to module (binary or shared object);\n//   %o - offset in the module in hex format;\n//   %f - function name;\n//   %q - offset in the function in hex format (*if available*);\n//   %s - path to source file;\n//   %l - line in the source file;\n//   %c - column in the source file;\n//   %F - if function is known to be <foo>, prints \"in <foo>\", possibly\n//        followed by the offset in this function, but only if source file\n//        is unknown;\n//   %S - prints file/line/column information;\n//   %L - prints location information: file/line/column, if it is known, or\n//        module+offset if it is known, or (<unknown module>) string.\n//   %M - prints module basename and offset, if it is known, or PC.\nvoid RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,\n                 uptr address, const AddressInfo *info, bool vs_style,\n                 const char *strip_path_prefix = \"\",\n                 const char *strip_func_prefix = \"\");\n\nbool RenderNeedsSymbolization(const char *format);\n\nvoid RenderSourceLocation(InternalScopedString *buffer, const char *file,\n                          int line, int column, bool vs_style,\n                          const char *strip_path_prefix);\n\nvoid RenderModuleLocation(InternalScopedString *buffer, const char *module,\n                          uptr offset, ModuleArch arch,\n                          const char *strip_path_prefix);\n\n// Same as RenderFrame, but for data section (global variables).\n// Accepts %s, %l from above.\n// Also accepts:\n//   %g - name of the global variable.\nvoid RenderData(InternalScopedString *buffer, const char *format,\n                const DataInfo *DI, const char *strip_path_prefix = \"\");\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_STACKTRACE_PRINTER_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stacktrace_sparc.cpp",
    "content": "//===-- sanitizer_stacktrace_sparc.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//\n// Implementation of fast stack unwinding for Sparc.\n//===----------------------------------------------------------------------===//\n\n#if defined(__sparc__)\n\n#if defined(__arch64__) || defined(__sparcv9)\n#define STACK_BIAS 2047\n#else\n#define STACK_BIAS 0\n#endif\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_stacktrace.h\"\n\nnamespace __sanitizer {\n\nvoid BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,\n                                    uptr stack_bottom, u32 max_depth) {\n  // TODO(yln): add arg sanity check for stack_top/stack_bottom\n  CHECK_GE(max_depth, 2);\n  const uptr kPageSize = GetPageSizeCached();\n#if defined(__GNUC__)\n  // __builtin_return_address returns the address of the call instruction\n  // on the SPARC and not the return address, so we need to compensate.\n  trace_buffer[0] = GetNextInstructionPc(pc);\n#else\n  trace_buffer[0] = pc;\n#endif\n  size = 1;\n  if (stack_top < 4096) return;  // Sanity check for stack top.\n  // Flush register windows to memory\n#if defined(__sparc_v9__) || defined(__sparcv9__) || defined(__sparcv9)\n  asm volatile(\"flushw\" ::: \"memory\");\n#else\n  asm volatile(\"ta 3\" ::: \"memory\");\n#endif\n  // On the SPARC, the return address is not in the frame, it is in a\n  // register.  There is no way to access it off of the current frame\n  // pointer, but it can be accessed off the previous frame pointer by\n  // reading the value from the register window save area.\n  uptr prev_bp = GET_CURRENT_FRAME();\n  uptr next_bp = prev_bp;\n  unsigned int i = 0;\n  while (next_bp != bp && IsAligned(next_bp, sizeof(uhwptr)) && i++ < 8) {\n    prev_bp = next_bp;\n    next_bp = (uptr)((uhwptr *)next_bp)[14] + STACK_BIAS;\n  }\n  if (next_bp == bp)\n    bp = prev_bp;\n  // Lowest possible address that makes sense as the next frame pointer.\n  // Goes up as we walk the stack.\n  uptr bottom = stack_bottom;\n  // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.\n  while (IsValidFrame(bp, stack_top, bottom) && IsAligned(bp, sizeof(uhwptr)) &&\n         size < max_depth) {\n    uhwptr pc1 = ((uhwptr *)bp)[15];\n    // Let's assume that any pointer in the 0th page is invalid and\n    // stop unwinding here.  If we're adding support for a platform\n    // where this isn't true, we need to reconsider this check.\n    if (pc1 < kPageSize)\n      break;\n    if (pc1 != pc) {\n      // %o7 contains the address of the call instruction and not the\n      // return address, so we need to compensate.\n      trace_buffer[size++] = GetNextInstructionPc((uptr)pc1);\n    }\n    bottom = bp;\n    bp = (uptr)((uhwptr *)bp)[14] + STACK_BIAS;\n  }\n}\n\n}  // namespace __sanitizer\n\n#endif  // !defined(__sparc__)\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld.h",
    "content": "//===-- sanitizer_stoptheworld.h --------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Defines the StopTheWorld function which suspends the execution of the current\n// process and runs the user-supplied callback in the same address space.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_STOPTHEWORLD_H\n#define SANITIZER_STOPTHEWORLD_H\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\nenum PtraceRegistersStatus {\n  REGISTERS_UNAVAILABLE_FATAL = -1,\n  REGISTERS_UNAVAILABLE = 0,\n  REGISTERS_AVAILABLE = 1\n};\n\n// Holds the list of suspended threads and provides an interface to dump their\n// register contexts.\nclass SuspendedThreadsList {\n public:\n  SuspendedThreadsList() = default;\n\n  // Can't declare pure virtual functions in sanitizer runtimes:\n  // __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.\n  virtual PtraceRegistersStatus GetRegistersAndSP(\n      uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {\n    UNIMPLEMENTED();\n  }\n\n  virtual uptr ThreadCount() const { UNIMPLEMENTED(); }\n  virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }\n\n protected:\n  ~SuspendedThreadsList() {}\n\n private:\n  // Prohibit copy and assign.\n  SuspendedThreadsList(const SuspendedThreadsList &) = delete;\n  void operator=(const SuspendedThreadsList &) = delete;\n};\n\ntypedef void (*StopTheWorldCallback)(\n    const SuspendedThreadsList &suspended_threads_list,\n    void *argument);\n\n// Suspend all threads in the current process and run the callback on the list\n// of suspended threads. This function will resume the threads before returning.\n// The callback should not call any libc functions. The callback must not call\n// exit() nor _exit() and instead return to the caller.\n// This function should NOT be called from multiple threads simultaneously.\nvoid StopTheWorld(StopTheWorldCallback callback, void *argument);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_STOPTHEWORLD_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp",
    "content": "//===-- sanitizer_stoptheworld_fuchsia.cpp -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===---------------------------------------------------------------------===//\n//\n// See sanitizer_stoptheworld.h for details.\n//\n//===---------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_FUCHSIA\n\n#include <zircon/sanitizer.h>\n\n#include \"sanitizer_stoptheworld.h\"\n#include \"sanitizer_stoptheworld_fuchsia.h\"\n\nnamespace __sanitizer {\n\n// The Fuchsia implementation stops the world but doesn't offer a real\n// SuspendedThreadsList argument.  This is enough for ASan's use case,\n// and LSan does not use this API on Fuchsia.\nvoid StopTheWorld(StopTheWorldCallback callback, void *argument) {\n  struct Params {\n    StopTheWorldCallback callback;\n    void *argument;\n  } params = {callback, argument};\n  __sanitizer_memory_snapshot(\n      nullptr, nullptr, nullptr, nullptr,\n      [](zx_status_t, void *data) {\n        auto params = reinterpret_cast<Params *>(data);\n        params->callback(SuspendedThreadsListFuchsia(), params->argument);\n      },\n      &params);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FUCHSIA\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld_fuchsia.h",
    "content": "//===-- sanitizer_stoptheworld_fuchsia.h ------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_STOPTHEWORLD_FUCHSIA_H\n#define SANITIZER_STOPTHEWORLD_FUCHSIA_H\n\n#include \"sanitizer_stoptheworld.h\"\n\nnamespace __sanitizer {\n\nclass SuspendedThreadsListFuchsia final : public SuspendedThreadsList {};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_STOPTHEWORLD_FUCHSIA_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp",
    "content": "//===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// See sanitizer_stoptheworld.h for details.\n// This implementation was inspired by Markus Gutschke's linuxthreads.cc.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_LINUX &&                                                   \\\n    (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \\\n     defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \\\n     defined(__arm__) || SANITIZER_RISCV64)\n\n#include \"sanitizer_stoptheworld.h\"\n\n#include \"sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_atomic.h\"\n\n#include <errno.h>\n#include <sched.h> // for CLONE_* definitions\n#include <stddef.h>\n#include <sys/prctl.h> // for PR_* definitions\n#include <sys/ptrace.h> // for PTRACE_* definitions\n#include <sys/types.h> // for pid_t\n#include <sys/uio.h> // for iovec\n#include <elf.h> // for NT_PRSTATUS\n#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID\n// GLIBC 2.20+ sys/user does not include asm/ptrace.h\n# include <asm/ptrace.h>\n#endif\n#include <sys/user.h>  // for user_regs_struct\n#if SANITIZER_ANDROID && SANITIZER_MIPS\n# include <asm/reg.h>  // for mips SP register in sys/user.h\n#endif\n#include <sys/wait.h> // for signal-related stuff\n\n#ifdef sa_handler\n# undef sa_handler\n#endif\n\n#ifdef sa_sigaction\n# undef sa_sigaction\n#endif\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_placement_new.h\"\n\n// Sufficiently old kernel headers don't provide this value, but we can still\n// call prctl with it. If the runtime kernel is new enough, the prctl call will\n// have the desired effect; if the kernel is too old, the call will error and we\n// can ignore said error.\n#ifndef PR_SET_PTRACER\n#define PR_SET_PTRACER 0x59616d61\n#endif\n\n// This module works by spawning a Linux task which then attaches to every\n// thread in the caller process with ptrace. This suspends the threads, and\n// PTRACE_GETREGS can then be used to obtain their register state. The callback\n// supplied to StopTheWorld() is run in the tracer task while the threads are\n// suspended.\n// The tracer task must be placed in a different thread group for ptrace to\n// work, so it cannot be spawned as a pthread. Instead, we use the low-level\n// clone() interface (we want to share the address space with the caller\n// process, so we prefer clone() over fork()).\n//\n// We don't use any libc functions, relying instead on direct syscalls. There\n// are two reasons for this:\n// 1. calling a library function while threads are suspended could cause a\n// deadlock, if one of the treads happens to be holding a libc lock;\n// 2. it's generally not safe to call libc functions from the tracer task,\n// because clone() does not set up a thread-local storage for it. Any\n// thread-local variables used by libc will be shared between the tracer task\n// and the thread which spawned it.\n\nnamespace __sanitizer {\n\nclass SuspendedThreadsListLinux final : public SuspendedThreadsList {\n public:\n  SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }\n\n  tid_t GetThreadID(uptr index) const override;\n  uptr ThreadCount() const override;\n  bool ContainsTid(tid_t thread_id) const;\n  void Append(tid_t tid);\n\n  PtraceRegistersStatus GetRegistersAndSP(uptr index,\n                                          InternalMmapVector<uptr> *buffer,\n                                          uptr *sp) const override;\n\n private:\n  InternalMmapVector<tid_t> thread_ids_;\n};\n\n// Structure for passing arguments into the tracer thread.\nstruct TracerThreadArgument {\n  StopTheWorldCallback callback;\n  void *callback_argument;\n  // The tracer thread waits on this mutex while the parent finishes its\n  // preparations.\n  Mutex mutex;\n  // Tracer thread signals its completion by setting done.\n  atomic_uintptr_t done;\n  uptr parent_pid;\n};\n\n// This class handles thread suspending/unsuspending in the tracer thread.\nclass ThreadSuspender {\n public:\n  explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)\n    : arg(arg)\n    , pid_(pid) {\n      CHECK_GE(pid, 0);\n    }\n  bool SuspendAllThreads();\n  void ResumeAllThreads();\n  void KillAllThreads();\n  SuspendedThreadsListLinux &suspended_threads_list() {\n    return suspended_threads_list_;\n  }\n  TracerThreadArgument *arg;\n private:\n  SuspendedThreadsListLinux suspended_threads_list_;\n  pid_t pid_;\n  bool SuspendThread(tid_t thread_id);\n};\n\nbool ThreadSuspender::SuspendThread(tid_t tid) {\n  // Are we already attached to this thread?\n  // Currently this check takes linear time, however the number of threads is\n  // usually small.\n  if (suspended_threads_list_.ContainsTid(tid)) return false;\n  int pterrno;\n  if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),\n                       &pterrno)) {\n    // Either the thread is dead, or something prevented us from attaching.\n    // Log this event and move on.\n    VReport(1, \"Could not attach to thread %zu (errno %d).\\n\", (uptr)tid,\n            pterrno);\n    return false;\n  } else {\n    VReport(2, \"Attached to thread %zu.\\n\", (uptr)tid);\n    // The thread is not guaranteed to stop before ptrace returns, so we must\n    // wait on it. Note: if the thread receives a signal concurrently,\n    // we can get notification about the signal before notification about stop.\n    // In such case we need to forward the signal to the thread, otherwise\n    // the signal will be missed (as we do PTRACE_DETACH with arg=0) and\n    // any logic relying on signals will break. After forwarding we need to\n    // continue to wait for stopping, because the thread is not stopped yet.\n    // We do ignore delivery of SIGSTOP, because we want to make stop-the-world\n    // as invisible as possible.\n    for (;;) {\n      int status;\n      uptr waitpid_status;\n      HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));\n      int wperrno;\n      if (internal_iserror(waitpid_status, &wperrno)) {\n        // Got a ECHILD error. I don't think this situation is possible, but it\n        // doesn't hurt to report it.\n        VReport(1, \"Waiting on thread %zu failed, detaching (errno %d).\\n\",\n                (uptr)tid, wperrno);\n        internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);\n        return false;\n      }\n      if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {\n        internal_ptrace(PTRACE_CONT, tid, nullptr,\n                        (void*)(uptr)WSTOPSIG(status));\n        continue;\n      }\n      break;\n    }\n    suspended_threads_list_.Append(tid);\n    return true;\n  }\n}\n\nvoid ThreadSuspender::ResumeAllThreads() {\n  for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {\n    pid_t tid = suspended_threads_list_.GetThreadID(i);\n    int pterrno;\n    if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),\n                          &pterrno)) {\n      VReport(2, \"Detached from thread %d.\\n\", tid);\n    } else {\n      // Either the thread is dead, or we are already detached.\n      // The latter case is possible, for instance, if this function was called\n      // from a signal handler.\n      VReport(1, \"Could not detach from thread %d (errno %d).\\n\", tid, pterrno);\n    }\n  }\n}\n\nvoid ThreadSuspender::KillAllThreads() {\n  for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)\n    internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),\n                    nullptr, nullptr);\n}\n\nbool ThreadSuspender::SuspendAllThreads() {\n  ThreadLister thread_lister(pid_);\n  bool retry = true;\n  InternalMmapVector<tid_t> threads;\n  threads.reserve(128);\n  for (int i = 0; i < 30 && retry; ++i) {\n    retry = false;\n    switch (thread_lister.ListThreads(&threads)) {\n      case ThreadLister::Error:\n        ResumeAllThreads();\n        return false;\n      case ThreadLister::Incomplete:\n        retry = true;\n        break;\n      case ThreadLister::Ok:\n        break;\n    }\n    for (tid_t tid : threads) {\n      if (SuspendThread(tid))\n        retry = true;\n    }\n  }\n  return suspended_threads_list_.ThreadCount();\n}\n\n// Pointer to the ThreadSuspender instance for use in signal handler.\nstatic ThreadSuspender *thread_suspender_instance = nullptr;\n\n// Synchronous signals that should not be blocked.\nstatic const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,\n                                    SIGXCPU, SIGXFSZ };\n\nstatic void TracerThreadDieCallback() {\n  // Generally a call to Die() in the tracer thread should be fatal to the\n  // parent process as well, because they share the address space.\n  // This really only works correctly if all the threads are suspended at this\n  // point. So we correctly handle calls to Die() from within the callback, but\n  // not those that happen before or after the callback. Hopefully there aren't\n  // a lot of opportunities for that to happen...\n  ThreadSuspender *inst = thread_suspender_instance;\n  if (inst && stoptheworld_tracer_pid == internal_getpid()) {\n    inst->KillAllThreads();\n    thread_suspender_instance = nullptr;\n  }\n}\n\n// Signal handler to wake up suspended threads when the tracer thread dies.\nstatic void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,\n                                      void *uctx) {\n  SignalContext ctx(siginfo, uctx);\n  Printf(\"Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\\n\", signum,\n         ctx.addr, ctx.pc, ctx.sp);\n  ThreadSuspender *inst = thread_suspender_instance;\n  if (inst) {\n    if (signum == SIGABRT)\n      inst->KillAllThreads();\n    else\n      inst->ResumeAllThreads();\n    RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));\n    thread_suspender_instance = nullptr;\n    atomic_store(&inst->arg->done, 1, memory_order_relaxed);\n  }\n  internal__exit((signum == SIGABRT) ? 1 : 2);\n}\n\n// Size of alternative stack for signal handlers in the tracer thread.\nstatic const int kHandlerStackSize = 8192;\n\n// This function will be run as a cloned task.\nstatic int TracerThread(void* argument) {\n  TracerThreadArgument *tracer_thread_argument =\n      (TracerThreadArgument *)argument;\n\n  internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);\n  // Check if parent is already dead.\n  if (internal_getppid() != tracer_thread_argument->parent_pid)\n    internal__exit(4);\n\n  // Wait for the parent thread to finish preparations.\n  tracer_thread_argument->mutex.Lock();\n  tracer_thread_argument->mutex.Unlock();\n\n  RAW_CHECK(AddDieCallback(TracerThreadDieCallback));\n\n  ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);\n  // Global pointer for the signal handler.\n  thread_suspender_instance = &thread_suspender;\n\n  // Alternate stack for signal handling.\n  InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);\n  stack_t handler_stack;\n  internal_memset(&handler_stack, 0, sizeof(handler_stack));\n  handler_stack.ss_sp = handler_stack_memory.data();\n  handler_stack.ss_size = kHandlerStackSize;\n  internal_sigaltstack(&handler_stack, nullptr);\n\n  // Install our handler for synchronous signals. Other signals should be\n  // blocked by the mask we inherited from the parent thread.\n  for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {\n    __sanitizer_sigaction act;\n    internal_memset(&act, 0, sizeof(act));\n    act.sigaction = TracerThreadSignalHandler;\n    act.sa_flags = SA_ONSTACK | SA_SIGINFO;\n    internal_sigaction_norestorer(kSyncSignals[i], &act, 0);\n  }\n\n  int exit_code = 0;\n  if (!thread_suspender.SuspendAllThreads()) {\n    VReport(1, \"Failed suspending threads.\\n\");\n    exit_code = 3;\n  } else {\n    tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),\n                                     tracer_thread_argument->callback_argument);\n    thread_suspender.ResumeAllThreads();\n    exit_code = 0;\n  }\n  RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));\n  thread_suspender_instance = nullptr;\n  atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);\n  return exit_code;\n}\n\nclass ScopedStackSpaceWithGuard {\n public:\n  explicit ScopedStackSpaceWithGuard(uptr stack_size) {\n    stack_size_ = stack_size;\n    guard_size_ = GetPageSizeCached();\n    // FIXME: Omitting MAP_STACK here works in current kernels but might break\n    // in the future.\n    guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,\n                                   \"ScopedStackWithGuard\");\n    CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));\n  }\n  ~ScopedStackSpaceWithGuard() {\n    UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);\n  }\n  void *Bottom() const {\n    return (void *)(guard_start_ + stack_size_ + guard_size_);\n  }\n\n private:\n  uptr stack_size_;\n  uptr guard_size_;\n  uptr guard_start_;\n};\n\n// We have a limitation on the stack frame size, so some stuff had to be moved\n// into globals.\nstatic __sanitizer_sigset_t blocked_sigset;\nstatic __sanitizer_sigset_t old_sigset;\n\nclass StopTheWorldScope {\n public:\n  StopTheWorldScope() {\n    // Make this process dumpable. Processes that are not dumpable cannot be\n    // attached to.\n    process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);\n    if (!process_was_dumpable_)\n      internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);\n  }\n\n  ~StopTheWorldScope() {\n    // Restore the dumpable flag.\n    if (!process_was_dumpable_)\n      internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);\n  }\n\n private:\n  int process_was_dumpable_;\n};\n\n// When sanitizer output is being redirected to file (i.e. by using log_path),\n// the tracer should write to the parent's log instead of trying to open a new\n// file. Alert the logging code to the fact that we have a tracer.\nstruct ScopedSetTracerPID {\n  explicit ScopedSetTracerPID(uptr tracer_pid) {\n    stoptheworld_tracer_pid = tracer_pid;\n    stoptheworld_tracer_ppid = internal_getpid();\n  }\n  ~ScopedSetTracerPID() {\n    stoptheworld_tracer_pid = 0;\n    stoptheworld_tracer_ppid = 0;\n  }\n};\n\nvoid StopTheWorld(StopTheWorldCallback callback, void *argument) {\n  StopTheWorldScope in_stoptheworld;\n  // Prepare the arguments for TracerThread.\n  struct TracerThreadArgument tracer_thread_argument;\n  tracer_thread_argument.callback = callback;\n  tracer_thread_argument.callback_argument = argument;\n  tracer_thread_argument.parent_pid = internal_getpid();\n  atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);\n  const uptr kTracerStackSize = 2 * 1024 * 1024;\n  ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);\n  // Block the execution of TracerThread until after we have set ptrace\n  // permissions.\n  tracer_thread_argument.mutex.Lock();\n  // Signal handling story.\n  // We don't want async signals to be delivered to the tracer thread,\n  // so we block all async signals before creating the thread. An async signal\n  // handler can temporary modify errno, which is shared with this thread.\n  // We ought to use pthread_sigmask here, because sigprocmask has undefined\n  // behavior in multithreaded programs. However, on linux sigprocmask is\n  // equivalent to pthread_sigmask with the exception that pthread_sigmask\n  // does not allow to block some signals used internally in pthread\n  // implementation. We are fine with blocking them here, we are really not\n  // going to pthread_cancel the thread.\n  // The tracer thread should not raise any synchronous signals. But in case it\n  // does, we setup a special handler for sync signals that properly kills the\n  // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers\n  // in the tracer thread won't interfere with user program. Double note: if a\n  // user does something along the lines of 'kill -11 pid', that can kill the\n  // process even if user setup own handler for SEGV.\n  // Thing to watch out for: this code should not change behavior of user code\n  // in any observable way. In particular it should not override user signal\n  // handlers.\n  internal_sigfillset(&blocked_sigset);\n  for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)\n    internal_sigdelset(&blocked_sigset, kSyncSignals[i]);\n  int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);\n  CHECK_EQ(rv, 0);\n  uptr tracer_pid = internal_clone(\n      TracerThread, tracer_stack.Bottom(),\n      CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,\n      &tracer_thread_argument, nullptr /* parent_tidptr */,\n      nullptr /* newtls */, nullptr /* child_tidptr */);\n  internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);\n  int local_errno = 0;\n  if (internal_iserror(tracer_pid, &local_errno)) {\n    VReport(1, \"Failed spawning a tracer thread (errno %d).\\n\", local_errno);\n    tracer_thread_argument.mutex.Unlock();\n  } else {\n    ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);\n    // On some systems we have to explicitly declare that we want to be traced\n    // by the tracer thread.\n    internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);\n    // Allow the tracer thread to start.\n    tracer_thread_argument.mutex.Unlock();\n    // NOTE: errno is shared between this thread and the tracer thread.\n    // internal_waitpid() may call syscall() which can access/spoil errno,\n    // so we can't call it now. Instead we for the tracer thread to finish using\n    // the spin loop below. Man page for sched_yield() says \"In the Linux\n    // implementation, sched_yield() always succeeds\", so let's hope it does not\n    // spoil errno. Note that this spin loop runs only for brief periods before\n    // the tracer thread has suspended us and when it starts unblocking threads.\n    while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)\n      sched_yield();\n    // Now the tracer thread is about to exit and does not touch errno,\n    // wait for it.\n    for (;;) {\n      uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);\n      if (!internal_iserror(waitpid_status, &local_errno))\n        break;\n      if (local_errno == EINTR)\n        continue;\n      VReport(1, \"Waiting on the tracer thread failed (errno %d).\\n\",\n              local_errno);\n      break;\n    }\n  }\n}\n\n// Platform-specific methods from SuspendedThreadsList.\n#if SANITIZER_ANDROID && defined(__arm__)\ntypedef pt_regs regs_struct;\n#define REG_SP ARM_sp\n\n#elif SANITIZER_LINUX && defined(__arm__)\ntypedef user_regs regs_struct;\n#define REG_SP uregs[13]\n\n#elif defined(__i386__) || defined(__x86_64__)\ntypedef user_regs_struct regs_struct;\n#if defined(__i386__)\n#define REG_SP esp\n#else\n#define REG_SP rsp\n#endif\n#define ARCH_IOVEC_FOR_GETREGSET\n// Support ptrace extensions even when compiled without required kernel support\n#ifndef NT_X86_XSTATE\n#define NT_X86_XSTATE 0x202\n#endif\n#ifndef PTRACE_GETREGSET\n#define PTRACE_GETREGSET 0x4204\n#endif\n// Compiler may use FP registers to store pointers.\nstatic constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};\n\n#elif defined(__powerpc__) || defined(__powerpc64__)\ntypedef pt_regs regs_struct;\n#define REG_SP gpr[PT_R1]\n\n#elif defined(__mips__)\ntypedef struct user regs_struct;\n# if SANITIZER_ANDROID\n#  define REG_SP regs[EF_R29]\n# else\n#  define REG_SP regs[EF_REG29]\n# endif\n\n#elif defined(__aarch64__)\ntypedef struct user_pt_regs regs_struct;\n#define REG_SP sp\nstatic constexpr uptr kExtraRegs[] = {0};\n#define ARCH_IOVEC_FOR_GETREGSET\n\n#elif SANITIZER_RISCV64\ntypedef struct user_regs_struct regs_struct;\n// sys/ucontext.h already defines REG_SP as 2. Undefine it first.\n#undef REG_SP\n#define REG_SP sp\nstatic constexpr uptr kExtraRegs[] = {0};\n#define ARCH_IOVEC_FOR_GETREGSET\n\n#elif defined(__s390__)\ntypedef _user_regs_struct regs_struct;\n#define REG_SP gprs[15]\nstatic constexpr uptr kExtraRegs[] = {0};\n#define ARCH_IOVEC_FOR_GETREGSET\n\n#else\n#error \"Unsupported architecture\"\n#endif // SANITIZER_ANDROID && defined(__arm__)\n\ntid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {\n  CHECK_LT(index, thread_ids_.size());\n  return thread_ids_[index];\n}\n\nuptr SuspendedThreadsListLinux::ThreadCount() const {\n  return thread_ids_.size();\n}\n\nbool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {\n  for (uptr i = 0; i < thread_ids_.size(); i++) {\n    if (thread_ids_[i] == thread_id) return true;\n  }\n  return false;\n}\n\nvoid SuspendedThreadsListLinux::Append(tid_t tid) {\n  thread_ids_.push_back(tid);\n}\n\nPtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(\n    uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {\n  pid_t tid = GetThreadID(index);\n  constexpr uptr uptr_sz = sizeof(uptr);\n  int pterrno;\n#ifdef ARCH_IOVEC_FOR_GETREGSET\n  auto append = [&](uptr regset) {\n    uptr size = buffer->size();\n    // NT_X86_XSTATE requires 64bit alignment.\n    uptr size_up = RoundUpTo(size, 8 / uptr_sz);\n    buffer->reserve(Max<uptr>(1024, size_up));\n    struct iovec regset_io;\n    for (;; buffer->resize(buffer->capacity() * 2)) {\n      buffer->resize(buffer->capacity());\n      uptr available_bytes = (buffer->size() - size_up) * uptr_sz;\n      regset_io.iov_base = buffer->data() + size_up;\n      regset_io.iov_len = available_bytes;\n      bool fail =\n          internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,\n                                           (void *)regset, (void *)&regset_io),\n                           &pterrno);\n      if (fail) {\n        VReport(1, \"Could not get regset %p from thread %d (errno %d).\\n\",\n                (void *)regset, tid, pterrno);\n        buffer->resize(size);\n        return false;\n      }\n\n      // Far enough from the buffer size, no need to resize and repeat.\n      if (regset_io.iov_len + 64 < available_bytes)\n        break;\n    }\n    buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz);\n    return true;\n  };\n\n  buffer->clear();\n  bool fail = !append(NT_PRSTATUS);\n  if (!fail) {\n    // Accept the first available and do not report errors.\n    for (uptr regs : kExtraRegs)\n      if (regs && append(regs))\n        break;\n  }\n#else\n  buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);\n  bool fail = internal_iserror(\n      internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);\n  if (fail)\n    VReport(1, \"Could not get registers from thread %d (errno %d).\\n\", tid,\n            pterrno);\n#endif\n  if (fail) {\n    // ESRCH means that the given thread is not suspended or already dead.\n    // Therefore it's unsafe to inspect its data (e.g. walk through stack) and\n    // we should notify caller about this.\n    return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL\n                            : REGISTERS_UNAVAILABLE;\n  }\n\n  *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;\n  return REGISTERS_AVAILABLE;\n}\n\n} // namespace __sanitizer\n\n#endif  // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)\n        // || defined(__aarch64__) || defined(__powerpc64__)\n        // || defined(__s390__) || defined(__i386__) || defined(__arm__)\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld_mac.cpp",
    "content": "//===-- sanitizer_stoptheworld_mac.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// See sanitizer_stoptheworld.h for details.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \\\n                      defined(__i386))\n\n#include <mach/mach.h>\n#include <mach/thread_info.h>\n#include <pthread.h>\n\n#include \"sanitizer_stoptheworld.h\"\n\nnamespace __sanitizer {\ntypedef struct {\n  tid_t tid;\n  thread_t thread;\n} SuspendedThreadInfo;\n\nclass SuspendedThreadsListMac final : public SuspendedThreadsList {\n public:\n  SuspendedThreadsListMac() : threads_(1024) {}\n\n  tid_t GetThreadID(uptr index) const override;\n  thread_t GetThread(uptr index) const;\n  uptr ThreadCount() const override;\n  bool ContainsThread(thread_t thread) const;\n  void Append(thread_t thread);\n\n  PtraceRegistersStatus GetRegistersAndSP(uptr index,\n                                          InternalMmapVector<uptr> *buffer,\n                                          uptr *sp) const override;\n\n private:\n  InternalMmapVector<SuspendedThreadInfo> threads_;\n};\n\nstruct RunThreadArgs {\n  StopTheWorldCallback callback;\n  void *argument;\n};\n\nvoid *RunThread(void *arg) {\n  struct RunThreadArgs *run_args = (struct RunThreadArgs *)arg;\n  SuspendedThreadsListMac suspended_threads_list;\n\n  thread_array_t threads;\n  mach_msg_type_number_t num_threads;\n  kern_return_t err = task_threads(mach_task_self(), &threads, &num_threads);\n  if (err != KERN_SUCCESS) {\n    VReport(1, \"Failed to get threads for task (errno %d).\\n\", err);\n    return nullptr;\n  }\n\n  thread_t thread_self = mach_thread_self();\n  for (unsigned int i = 0; i < num_threads; ++i) {\n    if (threads[i] == thread_self) continue;\n\n    thread_suspend(threads[i]);\n    suspended_threads_list.Append(threads[i]);\n  }\n\n  run_args->callback(suspended_threads_list, run_args->argument);\n\n  uptr num_suspended = suspended_threads_list.ThreadCount();\n  for (unsigned int i = 0; i < num_suspended; ++i) {\n    thread_resume(suspended_threads_list.GetThread(i));\n  }\n  return nullptr;\n}\n\nvoid StopTheWorld(StopTheWorldCallback callback, void *argument) {\n  struct RunThreadArgs arg = {callback, argument};\n  pthread_t run_thread = (pthread_t)internal_start_thread(RunThread, &arg);\n  internal_join_thread(run_thread);\n}\n\n#if defined(__x86_64__)\ntypedef x86_thread_state64_t regs_struct;\n\n#define SP_REG __rsp\n\n#elif defined(__aarch64__)\ntypedef arm_thread_state64_t regs_struct;\n\n# if __DARWIN_UNIX03\n#  define SP_REG __sp\n# else\n#  define SP_REG sp\n# endif\n\n#elif defined(__i386)\ntypedef x86_thread_state32_t regs_struct;\n\n#define SP_REG __esp\n\n#else\n#error \"Unsupported architecture\"\n#endif\n\ntid_t SuspendedThreadsListMac::GetThreadID(uptr index) const {\n  CHECK_LT(index, threads_.size());\n  return threads_[index].tid;\n}\n\nthread_t SuspendedThreadsListMac::GetThread(uptr index) const {\n  CHECK_LT(index, threads_.size());\n  return threads_[index].thread;\n}\n\nuptr SuspendedThreadsListMac::ThreadCount() const {\n  return threads_.size();\n}\n\nbool SuspendedThreadsListMac::ContainsThread(thread_t thread) const {\n  for (uptr i = 0; i < threads_.size(); i++) {\n    if (threads_[i].thread == thread) return true;\n  }\n  return false;\n}\n\nvoid SuspendedThreadsListMac::Append(thread_t thread) {\n  thread_identifier_info_data_t info;\n  mach_msg_type_number_t info_count = THREAD_IDENTIFIER_INFO_COUNT;\n  kern_return_t err = thread_info(thread, THREAD_IDENTIFIER_INFO,\n                                  (thread_info_t)&info, &info_count);\n  if (err != KERN_SUCCESS) {\n    VReport(1, \"Error - unable to get thread ident for a thread\\n\");\n    return;\n  }\n  threads_.push_back({info.thread_id, thread});\n}\n\nPtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(\n    uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {\n  thread_t thread = GetThread(index);\n  regs_struct regs;\n  int err;\n  mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;\n  err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)&regs,\n                         &reg_count);\n  if (err != KERN_SUCCESS) {\n    VReport(1, \"Error - unable to get registers for a thread\\n\");\n    // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,\n    // or the thread does not exist. The other possible error case,\n    // MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's\n    // still safe to proceed.\n    return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL\n                                        : REGISTERS_UNAVAILABLE;\n  }\n\n  buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));\n  internal_memcpy(buffer->data(), &regs, sizeof(regs));\n#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)\n  *sp = arm_thread_state64_get_sp(regs);\n#else\n  *sp = regs.SP_REG;\n#endif\n\n  // On x86_64 and aarch64, we must account for the stack redzone, which is 128\n  // bytes.\n  if (SANITIZER_WORDSIZE == 64) *sp -= 128;\n\n  return REGISTERS_AVAILABLE;\n}\n\n} // namespace __sanitizer\n\n#endif  // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||\n        //                   defined(__i386))\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp",
    "content": "//===-- sanitizer_stoptheworld_netbsd_libcdep.cpp -------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// See sanitizer_stoptheworld.h for details.\n// This implementation was inspired by Markus Gutschke's linuxthreads.cc.\n//\n// This is a NetBSD variation of Linux stoptheworld implementation\n// See sanitizer_stoptheworld_linux_libcdep.cpp for code comments.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_NETBSD\n\n#include \"sanitizer_stoptheworld.h\"\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_platform_limits_posix.h\"\n\n#include <sys/types.h>\n\n#include <sys/ptrace.h>\n#include <sys/uio.h>\n#include <sys/wait.h>\n\n#include <machine/reg.h>\n\n#include <elf.h>\n#include <errno.h>\n#include <sched.h>\n#include <signal.h>\n#include <stddef.h>\n\n#define internal_sigaction_norestorer internal_sigaction\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_placement_new.h\"\n\nnamespace __sanitizer {\n\nclass SuspendedThreadsListNetBSD final : public SuspendedThreadsList {\n public:\n  SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); }\n\n  tid_t GetThreadID(uptr index) const;\n  uptr ThreadCount() const;\n  bool ContainsTid(tid_t thread_id) const;\n  void Append(tid_t tid);\n\n  PtraceRegistersStatus GetRegistersAndSP(uptr index,\n                                          InternalMmapVector<uptr> *buffer,\n                                          uptr *sp) const;\n\n private:\n  InternalMmapVector<tid_t> thread_ids_;\n};\n\nstruct TracerThreadArgument {\n  StopTheWorldCallback callback;\n  void *callback_argument;\n  Mutex mutex;\n  atomic_uintptr_t done;\n  uptr parent_pid;\n};\n\nclass ThreadSuspender {\n public:\n  explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)\n      : arg(arg), pid_(pid) {\n    CHECK_GE(pid, 0);\n  }\n  bool SuspendAllThreads();\n  void ResumeAllThreads();\n  void KillAllThreads();\n  SuspendedThreadsListNetBSD &suspended_threads_list() {\n    return suspended_threads_list_;\n  }\n  TracerThreadArgument *arg;\n\n private:\n  SuspendedThreadsListNetBSD suspended_threads_list_;\n  pid_t pid_;\n};\n\nvoid ThreadSuspender::ResumeAllThreads() {\n  int pterrno;\n  if (!internal_iserror(internal_ptrace(PT_DETACH, pid_, (void *)(uptr)1, 0),\n                        &pterrno)) {\n    VReport(2, \"Detached from process %d.\\n\", pid_);\n  } else {\n    VReport(1, \"Could not detach from process %d (errno %d).\\n\", pid_, pterrno);\n  }\n}\n\nvoid ThreadSuspender::KillAllThreads() {\n  internal_ptrace(PT_KILL, pid_, nullptr, 0);\n}\n\nbool ThreadSuspender::SuspendAllThreads() {\n  int pterrno;\n  if (internal_iserror(internal_ptrace(PT_ATTACH, pid_, nullptr, 0),\n                       &pterrno)) {\n    Printf(\"Could not attach to process %d (errno %d).\\n\", pid_, pterrno);\n    return false;\n  }\n\n  int status;\n  uptr waitpid_status;\n  HANDLE_EINTR(waitpid_status, internal_waitpid(pid_, &status, 0));\n\n  VReport(2, \"Attached to process %d.\\n\", pid_);\n\n#ifdef PT_LWPNEXT\n  struct ptrace_lwpstatus pl;\n  int op = PT_LWPNEXT;\n#else\n  struct ptrace_lwpinfo pl;\n  int op = PT_LWPINFO;\n#endif\n\n  pl.pl_lwpid = 0;\n\n  int val;\n  while ((val = internal_ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&\n         pl.pl_lwpid != 0) {\n    suspended_threads_list_.Append(pl.pl_lwpid);\n    VReport(2, \"Appended thread %d in process %d.\\n\", pl.pl_lwpid, pid_);\n  }\n  return true;\n}\n\n// Pointer to the ThreadSuspender instance for use in signal handler.\nstatic ThreadSuspender *thread_suspender_instance = nullptr;\n\n// Synchronous signals that should not be blocked.\nstatic const int kSyncSignals[] = {SIGABRT, SIGILL,  SIGFPE, SIGSEGV,\n                                   SIGBUS,  SIGXCPU, SIGXFSZ};\n\nstatic void TracerThreadDieCallback() {\n  ThreadSuspender *inst = thread_suspender_instance;\n  if (inst && stoptheworld_tracer_pid == internal_getpid()) {\n    inst->KillAllThreads();\n    thread_suspender_instance = nullptr;\n  }\n}\n\n// Signal handler to wake up suspended threads when the tracer thread dies.\nstatic void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,\n                                      void *uctx) {\n  SignalContext ctx(siginfo, uctx);\n  Printf(\"Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\\n\", signum,\n         ctx.addr, ctx.pc, ctx.sp);\n  ThreadSuspender *inst = thread_suspender_instance;\n  if (inst) {\n    if (signum == SIGABRT)\n      inst->KillAllThreads();\n    else\n      inst->ResumeAllThreads();\n    RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));\n    thread_suspender_instance = nullptr;\n    atomic_store(&inst->arg->done, 1, memory_order_relaxed);\n  }\n  internal__exit((signum == SIGABRT) ? 1 : 2);\n}\n\n// Size of alternative stack for signal handlers in the tracer thread.\nstatic const int kHandlerStackSize = 8192;\n\n// This function will be run as a cloned task.\nstatic int TracerThread(void *argument) {\n  TracerThreadArgument *tracer_thread_argument =\n      (TracerThreadArgument *)argument;\n\n  // Check if parent is already dead.\n  if (internal_getppid() != tracer_thread_argument->parent_pid)\n    internal__exit(4);\n\n  // Wait for the parent thread to finish preparations.\n  tracer_thread_argument->mutex.Lock();\n  tracer_thread_argument->mutex.Unlock();\n\n  RAW_CHECK(AddDieCallback(TracerThreadDieCallback));\n\n  ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);\n  // Global pointer for the signal handler.\n  thread_suspender_instance = &thread_suspender;\n\n  // Alternate stack for signal handling.\n  InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);\n  stack_t handler_stack;\n  internal_memset(&handler_stack, 0, sizeof(handler_stack));\n  handler_stack.ss_sp = handler_stack_memory.data();\n  handler_stack.ss_size = kHandlerStackSize;\n  internal_sigaltstack(&handler_stack, nullptr);\n\n  // Install our handler for synchronous signals. Other signals should be\n  // blocked by the mask we inherited from the parent thread.\n  for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {\n    __sanitizer_sigaction act;\n    internal_memset(&act, 0, sizeof(act));\n    act.sigaction = TracerThreadSignalHandler;\n    act.sa_flags = SA_ONSTACK | SA_SIGINFO;\n    internal_sigaction_norestorer(kSyncSignals[i], &act, 0);\n  }\n\n  int exit_code = 0;\n  if (!thread_suspender.SuspendAllThreads()) {\n    VReport(1, \"Failed suspending threads.\\n\");\n    exit_code = 3;\n  } else {\n    tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),\n                                     tracer_thread_argument->callback_argument);\n    thread_suspender.ResumeAllThreads();\n    exit_code = 0;\n  }\n  RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));\n  thread_suspender_instance = nullptr;\n  atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);\n  return exit_code;\n}\n\nclass ScopedStackSpaceWithGuard {\n public:\n  explicit ScopedStackSpaceWithGuard(uptr stack_size) {\n    stack_size_ = stack_size;\n    guard_size_ = GetPageSizeCached();\n    // FIXME: Omitting MAP_STACK here works in current kernels but might break\n    // in the future.\n    guard_start_ =\n        (uptr)MmapOrDie(stack_size_ + guard_size_, \"ScopedStackWithGuard\");\n    CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));\n  }\n  ~ScopedStackSpaceWithGuard() {\n    UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);\n  }\n  void *Bottom() const {\n    return (void *)(guard_start_ + stack_size_ + guard_size_);\n  }\n\n private:\n  uptr stack_size_;\n  uptr guard_size_;\n  uptr guard_start_;\n};\n\nstatic __sanitizer_sigset_t blocked_sigset;\nstatic __sanitizer_sigset_t old_sigset;\n\nstruct ScopedSetTracerPID {\n  explicit ScopedSetTracerPID(uptr tracer_pid) {\n    stoptheworld_tracer_pid = tracer_pid;\n    stoptheworld_tracer_ppid = internal_getpid();\n  }\n  ~ScopedSetTracerPID() {\n    stoptheworld_tracer_pid = 0;\n    stoptheworld_tracer_ppid = 0;\n  }\n};\n\nvoid StopTheWorld(StopTheWorldCallback callback, void *argument) {\n  // Prepare the arguments for TracerThread.\n  struct TracerThreadArgument tracer_thread_argument;\n  tracer_thread_argument.callback = callback;\n  tracer_thread_argument.callback_argument = argument;\n  tracer_thread_argument.parent_pid = internal_getpid();\n  atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);\n  const uptr kTracerStackSize = 2 * 1024 * 1024;\n  ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);\n\n  tracer_thread_argument.mutex.Lock();\n\n  internal_sigfillset(&blocked_sigset);\n  for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)\n    internal_sigdelset(&blocked_sigset, kSyncSignals[i]);\n  int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);\n  CHECK_EQ(rv, 0);\n  uptr tracer_pid = internal_clone(TracerThread, tracer_stack.Bottom(),\n                                   CLONE_VM | CLONE_FS | CLONE_FILES,\n                                   &tracer_thread_argument);\n  internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);\n  int local_errno = 0;\n  if (internal_iserror(tracer_pid, &local_errno)) {\n    VReport(1, \"Failed spawning a tracer thread (errno %d).\\n\", local_errno);\n    tracer_thread_argument.mutex.Unlock();\n  } else {\n    ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);\n\n    tracer_thread_argument.mutex.Unlock();\n\n    while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)\n      sched_yield();\n\n    for (;;) {\n      uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);\n      if (!internal_iserror(waitpid_status, &local_errno))\n        break;\n      if (local_errno == EINTR)\n        continue;\n      VReport(1, \"Waiting on the tracer thread failed (errno %d).\\n\",\n              local_errno);\n      break;\n    }\n  }\n}\n\ntid_t SuspendedThreadsListNetBSD::GetThreadID(uptr index) const {\n  CHECK_LT(index, thread_ids_.size());\n  return thread_ids_[index];\n}\n\nuptr SuspendedThreadsListNetBSD::ThreadCount() const {\n  return thread_ids_.size();\n}\n\nbool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const {\n  for (uptr i = 0; i < thread_ids_.size(); i++) {\n    if (thread_ids_[i] == thread_id)\n      return true;\n  }\n  return false;\n}\n\nvoid SuspendedThreadsListNetBSD::Append(tid_t tid) {\n  thread_ids_.push_back(tid);\n}\n\nPtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(\n    uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {\n  lwpid_t tid = GetThreadID(index);\n  pid_t ppid = internal_getppid();\n  struct reg regs;\n  int pterrno;\n  bool isErr =\n      internal_iserror(internal_ptrace(PT_GETREGS, ppid, &regs, tid), &pterrno);\n  if (isErr) {\n    VReport(1,\n            \"Could not get registers from process %d thread %d (errno %d).\\n\",\n            ppid, tid, pterrno);\n    return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL\n                            : REGISTERS_UNAVAILABLE;\n  }\n\n  *sp = PTRACE_REG_SP(&regs);\n  buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));\n  internal_memcpy(buffer->data(), &regs, sizeof(regs));\n\n  return REGISTERS_AVAILABLE;\n}\n\n}  // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_stoptheworld_win.cpp",
    "content": "//===-- sanitizer_stoptheworld_win.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// See sanitizer_stoptheworld.h for details.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#if SANITIZER_WINDOWS\n\n#  define WIN32_LEAN_AND_MEAN\n#  include <windows.h>\n// windows.h needs to be included before tlhelp32.h\n#  include <tlhelp32.h>\n\n#  include \"sanitizer_stoptheworld.h\"\n\nnamespace __sanitizer {\n\nnamespace {\n\nstruct SuspendedThreadsListWindows final : public SuspendedThreadsList {\n  InternalMmapVector<HANDLE> threadHandles;\n  InternalMmapVector<DWORD> threadIds;\n\n  SuspendedThreadsListWindows() {\n    threadIds.reserve(1024);\n    threadHandles.reserve(1024);\n  }\n\n  PtraceRegistersStatus GetRegistersAndSP(uptr index,\n                                          InternalMmapVector<uptr> *buffer,\n                                          uptr *sp) const override;\n\n  tid_t GetThreadID(uptr index) const override;\n  uptr ThreadCount() const override;\n};\n\n// Stack Pointer register names on different architectures\n#  if SANITIZER_X64\n#    define SP_REG Rsp\n#  elif SANITIZER_I386\n#    define SP_REG Esp\n#  elif SANITIZER_ARM | SANITIZER_ARM64\n#    define SP_REG Sp\n#  else\n#    error Architecture not supported!\n#  endif\n\nPtraceRegistersStatus SuspendedThreadsListWindows::GetRegistersAndSP(\n    uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {\n  CHECK_LT(index, threadHandles.size());\n\n  buffer->resize(RoundUpTo(sizeof(CONTEXT), sizeof(uptr)) / sizeof(uptr));\n  CONTEXT *thread_context = reinterpret_cast<CONTEXT *>(buffer->data());\n  thread_context->ContextFlags = CONTEXT_ALL;\n  CHECK(GetThreadContext(threadHandles[index], thread_context));\n  *sp = thread_context->SP_REG;\n\n  return REGISTERS_AVAILABLE;\n}\n\ntid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const {\n  CHECK_LT(index, threadIds.size());\n  return threadIds[index];\n}\n\nuptr SuspendedThreadsListWindows::ThreadCount() const {\n  return threadIds.size();\n}\n\nstruct RunThreadArgs {\n  StopTheWorldCallback callback;\n  void *argument;\n};\n\nDWORD WINAPI RunThread(void *argument) {\n  RunThreadArgs *run_args = (RunThreadArgs *)argument;\n\n  const DWORD this_thread = GetCurrentThreadId();\n  const DWORD this_process = GetCurrentProcessId();\n\n  SuspendedThreadsListWindows suspended_threads_list;\n  bool new_thread_found;\n\n  do {\n    // Take a snapshot of all Threads\n    const HANDLE threads = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);\n    CHECK(threads != INVALID_HANDLE_VALUE);\n\n    THREADENTRY32 thread_entry;\n    thread_entry.dwSize = sizeof(thread_entry);\n    new_thread_found = false;\n\n    if (!Thread32First(threads, &thread_entry))\n      break;\n\n    do {\n      if (thread_entry.th32ThreadID == this_thread ||\n          thread_entry.th32OwnerProcessID != this_process)\n        continue;\n\n      bool suspended_thread = false;\n      for (const auto thread_id : suspended_threads_list.threadIds) {\n        if (thread_id == thread_entry.th32ThreadID) {\n          suspended_thread = true;\n          break;\n        }\n      }\n\n      // Skip the Thread if it was already suspended\n      if (suspended_thread)\n        continue;\n\n      const HANDLE thread =\n          OpenThread(THREAD_ALL_ACCESS, FALSE, thread_entry.th32ThreadID);\n      CHECK(thread);\n\n      if (SuspendThread(thread) == (DWORD)-1) {\n        DWORD last_error = GetLastError();\n\n        VPrintf(1, \"Could not suspend thread %lu (error %lu)\",\n                thread_entry.th32ThreadID, last_error);\n        continue;\n      }\n\n      suspended_threads_list.threadIds.push_back(thread_entry.th32ThreadID);\n      suspended_threads_list.threadHandles.push_back(thread);\n      new_thread_found = true;\n    } while (Thread32Next(threads, &thread_entry));\n\n    CloseHandle(threads);\n\n    // Between the call to `CreateToolhelp32Snapshot` and suspending the\n    // relevant Threads, new Threads could have potentially been created. So\n    // continue to find and suspend new Threads until we don't find any.\n  } while (new_thread_found);\n\n  // Now all Threads of this Process except of this Thread should be suspended.\n  // Execute the callback function.\n  run_args->callback(suspended_threads_list, run_args->argument);\n\n  // Resume all Threads\n  for (const auto suspended_thread_handle :\n       suspended_threads_list.threadHandles) {\n    CHECK_NE(ResumeThread(suspended_thread_handle), -1);\n    CloseHandle(suspended_thread_handle);\n  }\n\n  return 0;\n}\n\n}  // namespace\n\nvoid StopTheWorld(StopTheWorldCallback callback, void *argument) {\n  struct RunThreadArgs arg = {callback, argument};\n  DWORD trace_thread_id;\n\n  auto trace_thread =\n      CreateThread(nullptr, 0, RunThread, &arg, 0, &trace_thread_id);\n  CHECK(trace_thread);\n\n  WaitForSingleObject(trace_thread, INFINITE);\n  CloseHandle(trace_thread);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_WINDOWS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_suppressions.cpp",
    "content": "//===-- sanitizer_suppressions.cpp ----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Suppression parsing/matching code.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_suppressions.h\"\n\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_placement_new.h\"\n\nnamespace __sanitizer {\n\nSuppressionContext::SuppressionContext(const char *suppression_types[],\n                                       int suppression_types_num)\n    : suppression_types_(suppression_types),\n      suppression_types_num_(suppression_types_num),\n      can_parse_(true) {\n  CHECK_LE(suppression_types_num_, kMaxSuppressionTypes);\n  internal_memset(has_suppression_type_, 0, suppression_types_num_);\n}\n\n#if !SANITIZER_FUCHSIA\nstatic bool GetPathAssumingFileIsRelativeToExec(const char *file_path,\n                                                /*out*/char *new_file_path,\n                                                uptr new_file_path_size) {\n  InternalMmapVector<char> exec(kMaxPathLength);\n  if (ReadBinaryNameCached(exec.data(), exec.size())) {\n    const char *file_name_pos = StripModuleName(exec.data());\n    uptr path_to_exec_len = file_name_pos - exec.data();\n    internal_strncat(new_file_path, exec.data(),\n                     Min(path_to_exec_len, new_file_path_size - 1));\n    internal_strncat(new_file_path, file_path,\n                     new_file_path_size - internal_strlen(new_file_path) - 1);\n    return true;\n  }\n  return false;\n}\n\nstatic const char *FindFile(const char *file_path,\n                            /*out*/char *new_file_path,\n                            uptr new_file_path_size) {\n  // If we cannot find the file, check if its location is relative to\n  // the location of the executable.\n  if (!FileExists(file_path) && !IsAbsolutePath(file_path) &&\n      GetPathAssumingFileIsRelativeToExec(file_path, new_file_path,\n                                          new_file_path_size)) {\n    return new_file_path;\n  }\n  return file_path;\n}\n#else\nstatic const char *FindFile(const char *file_path, char *, uptr) {\n  return file_path;\n}\n#endif\n\nvoid SuppressionContext::ParseFromFile(const char *filename) {\n  if (filename[0] == '\\0')\n    return;\n\n  InternalMmapVector<char> new_file_path(kMaxPathLength);\n  filename = FindFile(filename, new_file_path.data(), new_file_path.size());\n\n  // Read the file.\n  VPrintf(1, \"%s: reading suppressions file at %s\\n\",\n          SanitizerToolName, filename);\n  char *file_contents;\n  uptr buffer_size;\n  uptr contents_size;\n  if (!ReadFileToBuffer(filename, &file_contents, &buffer_size,\n                        &contents_size)) {\n    Printf(\"%s: failed to read suppressions file '%s'\\n\", SanitizerToolName,\n           filename);\n    Die();\n  }\n\n  Parse(file_contents);\n}\n\nbool SuppressionContext::Match(const char *str, const char *type,\n                               Suppression **s) {\n  can_parse_ = false;\n  if (!HasSuppressionType(type))\n    return false;\n  for (uptr i = 0; i < suppressions_.size(); i++) {\n    Suppression &cur = suppressions_[i];\n    if (0 == internal_strcmp(cur.type, type) && TemplateMatch(cur.templ, str)) {\n      *s = &cur;\n      return true;\n    }\n  }\n  return false;\n}\n\nstatic const char *StripPrefix(const char *str, const char *prefix) {\n  while (*str && *str == *prefix) {\n    str++;\n    prefix++;\n  }\n  if (!*prefix)\n    return str;\n  return 0;\n}\n\nvoid SuppressionContext::Parse(const char *str) {\n  // Context must not mutate once Match has been called.\n  CHECK(can_parse_);\n  const char *line = str;\n  while (line) {\n    while (line[0] == ' ' || line[0] == '\\t')\n      line++;\n    const char *end = internal_strchr(line, '\\n');\n    if (end == 0)\n      end = line + internal_strlen(line);\n    if (line != end && line[0] != '#') {\n      const char *end2 = end;\n      while (line != end2 &&\n             (end2[-1] == ' ' || end2[-1] == '\\t' || end2[-1] == '\\r'))\n        end2--;\n      int type;\n      for (type = 0; type < suppression_types_num_; type++) {\n        const char *next_char = StripPrefix(line, suppression_types_[type]);\n        if (next_char && *next_char == ':') {\n          line = ++next_char;\n          break;\n        }\n      }\n      if (type == suppression_types_num_) {\n        Printf(\"%s: failed to parse suppressions\\n\", SanitizerToolName);\n        Die();\n      }\n      Suppression s;\n      s.type = suppression_types_[type];\n      s.templ = (char*)InternalAlloc(end2 - line + 1);\n      internal_memcpy(s.templ, line, end2 - line);\n      s.templ[end2 - line] = 0;\n      suppressions_.push_back(s);\n      has_suppression_type_[type] = true;\n    }\n    if (end[0] == 0)\n      break;\n    line = end + 1;\n  }\n}\n\nuptr SuppressionContext::SuppressionCount() const {\n  return suppressions_.size();\n}\n\nbool SuppressionContext::HasSuppressionType(const char *type) const {\n  for (int i = 0; i < suppression_types_num_; i++) {\n    if (0 == internal_strcmp(type, suppression_types_[i]))\n      return has_suppression_type_[i];\n  }\n  return false;\n}\n\nconst Suppression *SuppressionContext::SuppressionAt(uptr i) const {\n  CHECK_LT(i, suppressions_.size());\n  return &suppressions_[i];\n}\n\nvoid SuppressionContext::GetMatched(\n    InternalMmapVector<Suppression *> *matched) {\n  for (uptr i = 0; i < suppressions_.size(); i++)\n    if (atomic_load_relaxed(&suppressions_[i].hit_count))\n      matched->push_back(&suppressions_[i]);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_suppressions.h",
    "content": "//===-- sanitizer_suppressions.h --------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Suppression parsing/matching code.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_SUPPRESSIONS_H\n#define SANITIZER_SUPPRESSIONS_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\nstruct Suppression {\n  Suppression() { internal_memset(this, 0, sizeof(*this)); }\n  const char *type;\n  char *templ;\n  atomic_uint32_t hit_count;\n  uptr weight;\n};\n\nclass SuppressionContext {\n public:\n  // Create new SuppressionContext capable of parsing given suppression types.\n  SuppressionContext(const char *supprression_types[],\n                     int suppression_types_num);\n\n  void ParseFromFile(const char *filename);\n  void Parse(const char *str);\n\n  bool Match(const char *str, const char *type, Suppression **s);\n  uptr SuppressionCount() const;\n  bool HasSuppressionType(const char *type) const;\n  const Suppression *SuppressionAt(uptr i) const;\n  void GetMatched(InternalMmapVector<Suppression *> *matched);\n\n private:\n  static const int kMaxSuppressionTypes = 64;\n  const char **const suppression_types_;\n  const int suppression_types_num_;\n\n  InternalMmapVector<Suppression> suppressions_;\n  bool has_suppression_type_[kMaxSuppressionTypes];\n  bool can_parse_;\n};\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SUPPRESSIONS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer.cpp",
    "content": "//===-- sanitizer_symbolizer.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_platform.h\"\n#include \"sanitizer_symbolizer_internal.h\"\n\nnamespace __sanitizer {\n\nAddressInfo::AddressInfo() {\n  internal_memset(this, 0, sizeof(AddressInfo));\n  function_offset = kUnknown;\n}\n\nvoid AddressInfo::Clear() {\n  InternalFree(module);\n  InternalFree(function);\n  InternalFree(file);\n  internal_memset(this, 0, sizeof(AddressInfo));\n  function_offset = kUnknown;\n  uuid_size = 0;\n}\n\nvoid AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,\n                                 ModuleArch mod_arch) {\n  module = internal_strdup(mod_name);\n  module_offset = mod_offset;\n  module_arch = mod_arch;\n  uuid_size = 0;\n}\n\nvoid AddressInfo::FillModuleInfo(const LoadedModule &mod) {\n  module = internal_strdup(mod.full_name());\n  module_offset = address - mod.base_address();\n  module_arch = mod.arch();\n  if (mod.uuid_size())\n    internal_memcpy(uuid, mod.uuid(), mod.uuid_size());\n  uuid_size = mod.uuid_size();\n}\n\nSymbolizedStack::SymbolizedStack() : next(nullptr), info() {}\n\nSymbolizedStack *SymbolizedStack::New(uptr addr) {\n  void *mem = InternalAlloc(sizeof(SymbolizedStack));\n  SymbolizedStack *res = new(mem) SymbolizedStack();\n  res->info.address = addr;\n  return res;\n}\n\nvoid SymbolizedStack::ClearAll() {\n  info.Clear();\n  if (next)\n    next->ClearAll();\n  InternalFree(this);\n}\n\nDataInfo::DataInfo() {\n  internal_memset(this, 0, sizeof(DataInfo));\n}\n\nvoid DataInfo::Clear() {\n  InternalFree(module);\n  InternalFree(file);\n  InternalFree(name);\n  internal_memset(this, 0, sizeof(DataInfo));\n}\n\nvoid FrameInfo::Clear() {\n  InternalFree(module);\n  for (LocalInfo &local : locals) {\n    InternalFree(local.function_name);\n    InternalFree(local.name);\n    InternalFree(local.decl_file);\n  }\n  locals.clear();\n}\n\nSymbolizer *Symbolizer::symbolizer_;\nStaticSpinMutex Symbolizer::init_mu_;\nLowLevelAllocator Symbolizer::symbolizer_allocator_;\n\nvoid Symbolizer::InvalidateModuleList() {\n  modules_fresh_ = false;\n}\n\nvoid Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,\n                          Symbolizer::EndSymbolizationHook end_hook) {\n  CHECK(start_hook_ == 0 && end_hook_ == 0);\n  start_hook_ = start_hook;\n  end_hook_ = end_hook;\n}\n\nconst char *Symbolizer::ModuleNameOwner::GetOwnedCopy(const char *str) {\n  mu_->CheckLocked();\n\n  // 'str' will be the same string multiple times in a row, optimize this case.\n  if (last_match_ && !internal_strcmp(last_match_, str))\n    return last_match_;\n\n  // FIXME: this is linear search.\n  // We should optimize this further if this turns out to be a bottleneck later.\n  for (uptr i = 0; i < storage_.size(); ++i) {\n    if (!internal_strcmp(storage_[i], str)) {\n      last_match_ = storage_[i];\n      return last_match_;\n    }\n  }\n  last_match_ = internal_strdup(str);\n  storage_.push_back(last_match_);\n  return last_match_;\n}\n\nSymbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)\n    : module_names_(&mu_), modules_(), modules_fresh_(false), tools_(tools),\n      start_hook_(0), end_hook_(0) {}\n\nSymbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)\n    : sym_(sym) {\n  if (sym_->start_hook_)\n    sym_->start_hook_();\n}\n\nSymbolizer::SymbolizerScope::~SymbolizerScope() {\n  if (sym_->end_hook_)\n    sym_->end_hook_();\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer.h",
    "content": "//===-- sanitizer_symbolizer.h ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Symbolizer is used by sanitizers to map instruction address to a location in\n// source code at run-time. Symbolizer either uses __sanitizer_symbolize_*\n// defined in the program, or (if they are missing) tries to find and\n// launch \"llvm-symbolizer\" commandline tool in a separate process and\n// communicate with it.\n//\n// Generally we should try to avoid calling system library functions during\n// symbolization (and use their replacements from sanitizer_libc.h instead).\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_SYMBOLIZER_H\n#define SANITIZER_SYMBOLIZER_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_vector.h\"\n\nnamespace __sanitizer {\n\nstruct AddressInfo {\n  // Owns all the string members. Storage for them is\n  // (de)allocated using sanitizer internal allocator.\n  uptr address;\n\n  char *module;\n  uptr module_offset;\n  ModuleArch module_arch;\n  u8 uuid[kModuleUUIDSize];\n  uptr uuid_size;\n\n  static const uptr kUnknown = ~(uptr)0;\n  char *function;\n  uptr function_offset;\n\n  char *file;\n  int line;\n  int column;\n\n  AddressInfo();\n  // Deletes all strings and resets all fields.\n  void Clear();\n  void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);\n  void FillModuleInfo(const LoadedModule &mod);\n  uptr module_base() const { return address - module_offset; }\n};\n\n// Linked list of symbolized frames (each frame is described by AddressInfo).\nstruct SymbolizedStack {\n  SymbolizedStack *next;\n  AddressInfo info;\n  static SymbolizedStack *New(uptr addr);\n  // Deletes current, and all subsequent frames in the linked list.\n  // The object cannot be accessed after the call to this function.\n  void ClearAll();\n\n private:\n  SymbolizedStack();\n};\n\n// For now, DataInfo is used to describe global variable.\nstruct DataInfo {\n  // Owns all the string members. Storage for them is\n  // (de)allocated using sanitizer internal allocator.\n  char *module;\n  uptr module_offset;\n  ModuleArch module_arch;\n\n  char *file;\n  uptr line;\n  char *name;\n  uptr start;\n  uptr size;\n\n  DataInfo();\n  void Clear();\n};\n\nstruct LocalInfo {\n  char *function_name = nullptr;\n  char *name = nullptr;\n  char *decl_file = nullptr;\n  unsigned decl_line = 0;\n\n  bool has_frame_offset = false;\n  bool has_size = false;\n  bool has_tag_offset = false;\n\n  sptr frame_offset;\n  uptr size;\n  uptr tag_offset;\n\n  void Clear();\n};\n\nstruct FrameInfo {\n  char *module;\n  uptr module_offset;\n  ModuleArch module_arch;\n\n  InternalMmapVector<LocalInfo> locals;\n  void Clear();\n};\n\nclass SymbolizerTool;\n\nclass Symbolizer final {\n public:\n  /// Initialize and return platform-specific implementation of symbolizer\n  /// (if it wasn't already initialized).\n  static Symbolizer *GetOrInit();\n  static void LateInitialize();\n  // Returns a list of symbolized frames for a given address (containing\n  // all inlined functions, if necessary).\n  SymbolizedStack *SymbolizePC(uptr address);\n  bool SymbolizeData(uptr address, DataInfo *info);\n  bool SymbolizeFrame(uptr address, FrameInfo *info);\n\n  // The module names Symbolizer returns are stable and unique for every given\n  // module.  It is safe to store and compare them as pointers.\n  bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,\n                                   uptr *module_address);\n  const char *GetModuleNameForPc(uptr pc) {\n    const char *module_name = nullptr;\n    uptr unused;\n    if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused))\n      return module_name;\n    return nullptr;\n  }\n\n  // Release internal caches (if any).\n  void Flush();\n  // Attempts to demangle the provided C++ mangled name.\n  const char *Demangle(const char *name);\n\n  // Allow user to install hooks that would be called before/after Symbolizer\n  // does the actual file/line info fetching. Specific sanitizers may need this\n  // to distinguish system library calls made in user code from calls made\n  // during in-process symbolization.\n  typedef void (*StartSymbolizationHook)();\n  typedef void (*EndSymbolizationHook)();\n  // May be called at most once.\n  void AddHooks(StartSymbolizationHook start_hook,\n                EndSymbolizationHook end_hook);\n\n  void RefreshModules();\n  const LoadedModule *FindModuleForAddress(uptr address);\n\n  void InvalidateModuleList();\n\n private:\n  // GetModuleNameAndOffsetForPC has to return a string to the caller.\n  // Since the corresponding module might get unloaded later, we should create\n  // our owned copies of the strings that we can safely return.\n  // ModuleNameOwner does not provide any synchronization, thus calls to\n  // its method should be protected by |mu_|.\n  class ModuleNameOwner {\n   public:\n    explicit ModuleNameOwner(Mutex *synchronized_by)\n        : last_match_(nullptr), mu_(synchronized_by) {\n      storage_.reserve(kInitialCapacity);\n    }\n    const char *GetOwnedCopy(const char *str);\n\n   private:\n    static const uptr kInitialCapacity = 1000;\n    InternalMmapVector<const char*> storage_;\n    const char *last_match_;\n\n    Mutex *mu_;\n  } module_names_;\n\n  /// Platform-specific function for creating a Symbolizer object.\n  static Symbolizer *PlatformInit();\n\n  bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,\n                                         uptr *module_offset,\n                                         ModuleArch *module_arch);\n  ListOfModules modules_;\n  ListOfModules fallback_modules_;\n  // If stale, need to reload the modules before looking up addresses.\n  bool modules_fresh_;\n\n  // Platform-specific default demangler, must not return nullptr.\n  const char *PlatformDemangle(const char *name);\n\n  static Symbolizer *symbolizer_;\n  static StaticSpinMutex init_mu_;\n\n  // Mutex locked from public methods of |Symbolizer|, so that the internals\n  // (including individual symbolizer tools and platform-specific methods) are\n  // always synchronized.\n  Mutex mu_;\n\n  IntrusiveList<SymbolizerTool> tools_;\n\n  explicit Symbolizer(IntrusiveList<SymbolizerTool> tools);\n\n  static LowLevelAllocator symbolizer_allocator_;\n\n  StartSymbolizationHook start_hook_;\n  EndSymbolizationHook end_hook_;\n  class SymbolizerScope {\n   public:\n    explicit SymbolizerScope(const Symbolizer *sym);\n    ~SymbolizerScope();\n   private:\n    const Symbolizer *sym_;\n  };\n};\n\n#ifdef SANITIZER_WINDOWS\nvoid InitializeDbgHelpIfNeeded();\n#endif\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SYMBOLIZER_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_fuchsia.h",
    "content": "//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries.\n//\n// Define Fuchsia's string formats and limits for the markup symbolizer.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H\n#define SANITIZER_SYMBOLIZER_FUCHSIA_H\n\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\n// See the spec at:\n// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md\n\n// This is used by UBSan for type names, and by ASan for global variable names.\nconstexpr const char *kFormatDemangle = \"{{{symbol:%s}}}\";\nconstexpr uptr kFormatDemangleMax = 1024;  // Arbitrary.\n\n// Function name or equivalent from PC location.\nconstexpr const char *kFormatFunction = \"{{{pc:%p}}}\";\nconstexpr uptr kFormatFunctionMax = 64;  // More than big enough for 64-bit hex.\n\n// Global variable name or equivalent from data memory address.\nconstexpr const char *kFormatData = \"{{{data:%p}}}\";\n\n// One frame in a backtrace (printed on a line by itself).\nconstexpr const char *kFormatFrame = \"{{{bt:%u:%p}}}\";\n\n// Dump trigger element.\n#define FORMAT_DUMPFILE \"{{{dumpfile:%s:%s}}}\"\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SYMBOLIZER_FUCHSIA_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_internal.h",
    "content": "//===-- sanitizer_symbolizer_internal.h -------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Header for internal classes and functions to be used by implementations of\n// symbolizers.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H\n#define SANITIZER_SYMBOLIZER_INTERNAL_H\n\n#include \"sanitizer_symbolizer.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_vector.h\"\n\nnamespace __sanitizer {\n\n// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr\n// is extracted. When extracting a string, a newly allocated (using\n// InternalAlloc) and null-terminated buffer is returned. They return a pointer\n// to the next characted after the found delimiter.\nconst char *ExtractToken(const char *str, const char *delims, char **result);\nconst char *ExtractInt(const char *str, const char *delims, int *result);\nconst char *ExtractUptr(const char *str, const char *delims, uptr *result);\nconst char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,\n                                      char **result);\n\nconst char *DemangleSwiftAndCXX(const char *name);\n\n// SymbolizerTool is an interface that is implemented by individual \"tools\"\n// that can perform symbolication (external llvm-symbolizer, libbacktrace,\n// Windows DbgHelp symbolizer, etc.).\nclass SymbolizerTool {\n public:\n  // The main |Symbolizer| class implements a \"fallback chain\" of symbolizer\n  // tools. In a request to symbolize an address, if one tool returns false,\n  // the next tool in the chain will be tried.\n  SymbolizerTool *next;\n\n  SymbolizerTool() : next(nullptr) { }\n\n  // Can't declare pure virtual functions in sanitizer runtimes:\n  // __cxa_pure_virtual might be unavailable.\n\n  // The |stack| parameter is inout. It is pre-filled with the address,\n  // module base and module offset values and is to be used to construct\n  // other stack frames.\n  virtual bool SymbolizePC(uptr addr, SymbolizedStack *stack) {\n    UNIMPLEMENTED();\n  }\n\n  // The |info| parameter is inout. It is pre-filled with the module base\n  // and module offset values.\n  virtual bool SymbolizeData(uptr addr, DataInfo *info) {\n    UNIMPLEMENTED();\n  }\n\n  virtual bool SymbolizeFrame(uptr addr, FrameInfo *info) {\n    return false;\n  }\n\n  virtual void Flush() {}\n\n  // Return nullptr to fallback to the default platform-specific demangler.\n  virtual const char *Demangle(const char *name) {\n    return nullptr;\n  }\n\n protected:\n  ~SymbolizerTool() {}\n};\n\n// SymbolizerProcess encapsulates communication between the tool and\n// external symbolizer program, running in a different subprocess.\n// SymbolizerProcess may not be used from two threads simultaneously.\nclass SymbolizerProcess {\n public:\n  explicit SymbolizerProcess(const char *path, bool use_posix_spawn = false);\n  const char *SendCommand(const char *command);\n\n protected:\n  ~SymbolizerProcess() {}\n\n  /// The maximum number of arguments required to invoke a tool process.\n  static const unsigned kArgVMax = 16;\n\n  // Customizable by subclasses.\n  virtual bool StartSymbolizerSubprocess();\n  virtual bool ReadFromSymbolizer(char *buffer, uptr max_length);\n  // Return the environment to run the symbolizer in.\n  virtual char **GetEnvP() { return GetEnviron(); }\n\n private:\n  virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {\n    UNIMPLEMENTED();\n  }\n\n  /// Fill in an argv array to invoke the child process.\n  virtual void GetArgV(const char *path_to_binary,\n                       const char *(&argv)[kArgVMax]) const {\n    UNIMPLEMENTED();\n  }\n\n  bool Restart();\n  const char *SendCommandImpl(const char *command);\n  bool WriteToSymbolizer(const char *buffer, uptr length);\n\n  const char *path_;\n  fd_t input_fd_;\n  fd_t output_fd_;\n\n  static const uptr kBufferSize = 16 * 1024;\n  char buffer_[kBufferSize];\n\n  static const uptr kMaxTimesRestarted = 5;\n  static const int kSymbolizerStartupTimeMillis = 10;\n  uptr times_restarted_;\n  bool failed_to_start_;\n  bool reported_invalid_path_;\n  bool use_posix_spawn_;\n};\n\nclass LLVMSymbolizerProcess;\n\n// This tool invokes llvm-symbolizer in a subprocess. It should be as portable\n// as the llvm-symbolizer tool is.\nclass LLVMSymbolizer final : public SymbolizerTool {\n public:\n  explicit LLVMSymbolizer(const char *path, LowLevelAllocator *allocator);\n\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;\n  bool SymbolizeData(uptr addr, DataInfo *info) override;\n  bool SymbolizeFrame(uptr addr, FrameInfo *info) override;\n\n private:\n  const char *FormatAndSendCommand(const char *command_prefix,\n                                   const char *module_name, uptr module_offset,\n                                   ModuleArch arch);\n\n  LLVMSymbolizerProcess *symbolizer_process_;\n  static const uptr kBufferSize = 16 * 1024;\n  char buffer_[kBufferSize];\n};\n\n// Parses one or more two-line strings in the following format:\n//   <function_name>\n//   <file_name>:<line_number>[:<column_number>]\n// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of\n// them use the same output format.  Returns true if any useful debug\n// information was found.\nvoid ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);\n\n// Parses a two-line string in the following format:\n//   <symbol_name>\n//   <start_address> <size>\n// Used by LLVMSymbolizer and InternalSymbolizer.\nvoid ParseSymbolizeDataOutput(const char *str, DataInfo *info);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SYMBOLIZER_INTERNAL_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp",
    "content": "//===-- sanitizer_symbolizer_libbacktrace.cpp -----------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n// Libbacktrace implementation of symbolizer parts.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_symbolizer.h\"\n#include \"sanitizer_symbolizer_libbacktrace.h\"\n\n#if SANITIZER_LIBBACKTRACE\n# include \"backtrace-supported.h\"\n# if SANITIZER_POSIX && BACKTRACE_SUPPORTED && !BACKTRACE_USES_MALLOC\n#  include \"backtrace.h\"\n#  if SANITIZER_CP_DEMANGLE\n#   undef ARRAY_SIZE\n#   include \"demangle.h\"\n#  endif\n# else\n#  define SANITIZER_LIBBACKTRACE 0\n# endif\n#endif\n\nnamespace __sanitizer {\n\nstatic char *DemangleAlloc(const char *name, bool always_alloc);\n\n#if SANITIZER_LIBBACKTRACE\n\nnamespace {\n\n# if SANITIZER_CP_DEMANGLE\nstruct CplusV3DemangleData {\n  char *buf;\n  uptr size, allocated;\n};\n\nextern \"C\" {\nstatic void CplusV3DemangleCallback(const char *s, size_t l, void *vdata) {\n  CplusV3DemangleData *data = (CplusV3DemangleData *)vdata;\n  uptr needed = data->size + l + 1;\n  if (needed > data->allocated) {\n    data->allocated *= 2;\n    if (needed > data->allocated)\n      data->allocated = needed;\n    char *buf = (char *)InternalAlloc(data->allocated);\n    if (data->buf) {\n      internal_memcpy(buf, data->buf, data->size);\n      InternalFree(data->buf);\n    }\n    data->buf = buf;\n  }\n  internal_memcpy(data->buf + data->size, s, l);\n  data->buf[data->size + l] = '\\0';\n  data->size += l;\n}\n}  // extern \"C\"\n\nchar *CplusV3Demangle(const char *name) {\n  CplusV3DemangleData data;\n  data.buf = 0;\n  data.size = 0;\n  data.allocated = 0;\n  if (cplus_demangle_v3_callback(name, DMGL_PARAMS | DMGL_ANSI,\n                                 CplusV3DemangleCallback, &data)) {\n    if (data.size + 64 > data.allocated)\n      return data.buf;\n    char *buf = internal_strdup(data.buf);\n    InternalFree(data.buf);\n    return buf;\n  }\n  if (data.buf)\n    InternalFree(data.buf);\n  return 0;\n}\n# endif  // SANITIZER_CP_DEMANGLE\n\nstruct SymbolizeCodeCallbackArg {\n  SymbolizedStack *first;\n  SymbolizedStack *last;\n  uptr frames_symbolized;\n\n  AddressInfo *get_new_frame(uintptr_t addr) {\n    CHECK(last);\n    if (frames_symbolized > 0) {\n      SymbolizedStack *cur = SymbolizedStack::New(addr);\n      AddressInfo *info = &cur->info;\n      info->FillModuleInfo(first->info.module, first->info.module_offset,\n                           first->info.module_arch);\n      last->next = cur;\n      last = cur;\n    }\n    CHECK_EQ(addr, first->info.address);\n    CHECK_EQ(addr, last->info.address);\n    return &last->info;\n  }\n};\n\nextern \"C\" {\nstatic int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,\n                                       const char *filename, int lineno,\n                                       const char *function) {\n  SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;\n  if (function) {\n    AddressInfo *info = cdata->get_new_frame(addr);\n    info->function = DemangleAlloc(function, /*always_alloc*/ true);\n    if (filename)\n      info->file = internal_strdup(filename);\n    info->line = lineno;\n    cdata->frames_symbolized++;\n  }\n  return 0;\n}\n\nstatic void SymbolizeCodeCallback(void *vdata, uintptr_t addr,\n                                  const char *symname, uintptr_t, uintptr_t) {\n  SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;\n  if (symname) {\n    AddressInfo *info = cdata->get_new_frame(addr);\n    info->function = DemangleAlloc(symname, /*always_alloc*/ true);\n    cdata->frames_symbolized++;\n  }\n}\n\nstatic void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,\n                                  uintptr_t symval, uintptr_t symsize) {\n  DataInfo *info = (DataInfo *)vdata;\n  if (symname && symval) {\n    info->name = DemangleAlloc(symname, /*always_alloc*/ true);\n    info->start = symval;\n    info->size = symsize;\n  }\n}\n\nstatic void ErrorCallback(void *, const char *, int) {}\n}  // extern \"C\"\n\n}  // namespace\n\nLibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {\n  // State created in backtrace_create_state is leaked.\n  void *state = (void *)(backtrace_create_state(\"/proc/self/exe\", 0,\n                                                ErrorCallback, NULL));\n  if (!state)\n    return 0;\n  return new(*alloc) LibbacktraceSymbolizer(state);\n}\n\nbool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {\n  SymbolizeCodeCallbackArg data;\n  data.first = stack;\n  data.last = stack;\n  data.frames_symbolized = 0;\n  backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback,\n                   ErrorCallback, &data);\n  if (data.frames_symbolized > 0)\n    return true;\n  backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback,\n                    ErrorCallback, &data);\n  return (data.frames_symbolized > 0);\n}\n\nbool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {\n  backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeDataCallback,\n                    ErrorCallback, info);\n  return true;\n}\n\n#else  // SANITIZER_LIBBACKTRACE\n\nLibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {\n  return 0;\n}\n\nbool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {\n  (void)state_;\n  return false;\n}\n\nbool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {\n  return false;\n}\n\n#endif  // SANITIZER_LIBBACKTRACE\n\nstatic char *DemangleAlloc(const char *name, bool always_alloc) {\n#if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE\n  if (char *demangled = CplusV3Demangle(name))\n    return demangled;\n#endif\n  if (always_alloc)\n    return internal_strdup(name);\n  return 0;\n}\n\nconst char *LibbacktraceSymbolizer::Demangle(const char *name) {\n  return DemangleAlloc(name, /*always_alloc*/ false);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_libbacktrace.h",
    "content": "//===-- sanitizer_symbolizer_libbacktrace.h ---------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n// Header for libbacktrace symbolizer.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_SYMBOLIZER_LIBBACKTRACE_H\n#define SANITIZER_SYMBOLIZER_LIBBACKTRACE_H\n\n#include \"sanitizer_platform.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_symbolizer_internal.h\"\n\n#ifndef SANITIZER_LIBBACKTRACE\n# define SANITIZER_LIBBACKTRACE 0\n#endif\n\n#ifndef SANITIZER_CP_DEMANGLE\n# define SANITIZER_CP_DEMANGLE 0\n#endif\n\nnamespace __sanitizer {\n\nclass LibbacktraceSymbolizer final : public SymbolizerTool {\n public:\n  static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);\n\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;\n\n  bool SymbolizeData(uptr addr, DataInfo *info) override;\n\n  // May return NULL if demangling failed.\n  const char *Demangle(const char *name) override;\n\n private:\n  explicit LibbacktraceSymbolizer(void *state) : state_(state) {}\n\n  void *state_;  // Leaked.\n};\n\n}  // namespace __sanitizer\n#endif  // SANITIZER_SYMBOLIZER_LIBBACKTRACE_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_libcdep.cpp",
    "content": "//===-- sanitizer_symbolizer_libcdep.cpp ----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_platform.h\"\n#include \"sanitizer_symbolizer_internal.h\"\n\nnamespace __sanitizer {\n\nSymbolizer *Symbolizer::GetOrInit() {\n  SpinMutexLock l(&init_mu_);\n  if (symbolizer_)\n    return symbolizer_;\n  symbolizer_ = PlatformInit();\n  CHECK(symbolizer_);\n  return symbolizer_;\n}\n\n// See sanitizer_symbolizer_markup.cpp.\n#if !SANITIZER_SYMBOLIZER_MARKUP\n\nconst char *ExtractToken(const char *str, const char *delims, char **result) {\n  uptr prefix_len = internal_strcspn(str, delims);\n  *result = (char*)InternalAlloc(prefix_len + 1);\n  internal_memcpy(*result, str, prefix_len);\n  (*result)[prefix_len] = '\\0';\n  const char *prefix_end = str + prefix_len;\n  if (*prefix_end != '\\0') prefix_end++;\n  return prefix_end;\n}\n\nconst char *ExtractInt(const char *str, const char *delims, int *result) {\n  char *buff = nullptr;\n  const char *ret = ExtractToken(str, delims, &buff);\n  if (buff) {\n    *result = (int)internal_atoll(buff);\n  }\n  InternalFree(buff);\n  return ret;\n}\n\nconst char *ExtractUptr(const char *str, const char *delims, uptr *result) {\n  char *buff = nullptr;\n  const char *ret = ExtractToken(str, delims, &buff);\n  if (buff) {\n    *result = (uptr)internal_atoll(buff);\n  }\n  InternalFree(buff);\n  return ret;\n}\n\nconst char *ExtractSptr(const char *str, const char *delims, sptr *result) {\n  char *buff = nullptr;\n  const char *ret = ExtractToken(str, delims, &buff);\n  if (buff) {\n    *result = (sptr)internal_atoll(buff);\n  }\n  InternalFree(buff);\n  return ret;\n}\n\nconst char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,\n                                      char **result) {\n  const char *found_delimiter = internal_strstr(str, delimiter);\n  uptr prefix_len =\n      found_delimiter ? found_delimiter - str : internal_strlen(str);\n  *result = (char *)InternalAlloc(prefix_len + 1);\n  internal_memcpy(*result, str, prefix_len);\n  (*result)[prefix_len] = '\\0';\n  const char *prefix_end = str + prefix_len;\n  if (*prefix_end != '\\0') prefix_end += internal_strlen(delimiter);\n  return prefix_end;\n}\n\nSymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {\n  Lock l(&mu_);\n  SymbolizedStack *res = SymbolizedStack::New(addr);\n  auto *mod = FindModuleForAddress(addr);\n  if (!mod)\n    return res;\n  // Always fill data about module name and offset.\n  res->info.FillModuleInfo(*mod);\n  for (auto &tool : tools_) {\n    SymbolizerScope sym_scope(this);\n    if (tool.SymbolizePC(addr, res)) {\n      return res;\n    }\n  }\n  return res;\n}\n\nbool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {\n  Lock l(&mu_);\n  const char *module_name = nullptr;\n  uptr module_offset;\n  ModuleArch arch;\n  if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,\n                                         &arch))\n    return false;\n  info->Clear();\n  info->module = internal_strdup(module_name);\n  info->module_offset = module_offset;\n  info->module_arch = arch;\n  for (auto &tool : tools_) {\n    SymbolizerScope sym_scope(this);\n    if (tool.SymbolizeData(addr, info)) {\n      return true;\n    }\n  }\n  return true;\n}\n\nbool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {\n  Lock l(&mu_);\n  const char *module_name = nullptr;\n  if (!FindModuleNameAndOffsetForAddress(\n          addr, &module_name, &info->module_offset, &info->module_arch))\n    return false;\n  info->module = internal_strdup(module_name);\n  for (auto &tool : tools_) {\n    SymbolizerScope sym_scope(this);\n    if (tool.SymbolizeFrame(addr, info)) {\n      return true;\n    }\n  }\n  return true;\n}\n\nbool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,\n                                             uptr *module_address) {\n  Lock l(&mu_);\n  const char *internal_module_name = nullptr;\n  ModuleArch arch;\n  if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,\n                                         module_address, &arch))\n    return false;\n\n  if (module_name)\n    *module_name = module_names_.GetOwnedCopy(internal_module_name);\n  return true;\n}\n\nvoid Symbolizer::Flush() {\n  Lock l(&mu_);\n  for (auto &tool : tools_) {\n    SymbolizerScope sym_scope(this);\n    tool.Flush();\n  }\n}\n\nconst char *Symbolizer::Demangle(const char *name) {\n  Lock l(&mu_);\n  for (auto &tool : tools_) {\n    SymbolizerScope sym_scope(this);\n    if (const char *demangled = tool.Demangle(name))\n      return demangled;\n  }\n  return PlatformDemangle(name);\n}\n\nbool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,\n                                                   const char **module_name,\n                                                   uptr *module_offset,\n                                                   ModuleArch *module_arch) {\n  const LoadedModule *module = FindModuleForAddress(address);\n  if (!module)\n    return false;\n  *module_name = module->full_name();\n  *module_offset = address - module->base_address();\n  *module_arch = module->arch();\n  return true;\n}\n\nvoid Symbolizer::RefreshModules() {\n  modules_.init();\n  fallback_modules_.fallbackInit();\n  RAW_CHECK(modules_.size() > 0);\n  modules_fresh_ = true;\n}\n\nstatic const LoadedModule *SearchForModule(const ListOfModules &modules,\n                                           uptr address) {\n  for (uptr i = 0; i < modules.size(); i++) {\n    if (modules[i].containsAddress(address)) {\n      return &modules[i];\n    }\n  }\n  return nullptr;\n}\n\nconst LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {\n  bool modules_were_reloaded = false;\n  if (!modules_fresh_) {\n    RefreshModules();\n    modules_were_reloaded = true;\n  }\n  const LoadedModule *module = SearchForModule(modules_, address);\n  if (module) return module;\n\n  // dlopen/dlclose interceptors invalidate the module list, but when\n  // interception is disabled, we need to retry if the lookup fails in\n  // case the module list changed.\n#if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE\n  if (!modules_were_reloaded) {\n    RefreshModules();\n    module = SearchForModule(modules_, address);\n    if (module) return module;\n  }\n#endif\n\n  if (fallback_modules_.size()) {\n    module = SearchForModule(fallback_modules_, address);\n  }\n  return module;\n}\n\n// For now we assume the following protocol:\n// For each request of the form\n//   <module_name> <module_offset>\n// passed to STDIN, external symbolizer prints to STDOUT response:\n//   <function_name>\n//   <file_name>:<line_number>:<column_number>\n//   <function_name>\n//   <file_name>:<line_number>:<column_number>\n//   ...\n//   <empty line>\nclass LLVMSymbolizerProcess final : public SymbolizerProcess {\n public:\n  explicit LLVMSymbolizerProcess(const char *path)\n      : SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_MAC) {}\n\n private:\n  bool ReachedEndOfOutput(const char *buffer, uptr length) const override {\n    // Empty line marks the end of llvm-symbolizer output.\n    return length >= 2 && buffer[length - 1] == '\\n' &&\n           buffer[length - 2] == '\\n';\n  }\n\n  // When adding a new architecture, don't forget to also update\n  // script/asan_symbolize.py and sanitizer_common.h.\n  void GetArgV(const char *path_to_binary,\n               const char *(&argv)[kArgVMax]) const override {\n#if defined(__x86_64h__)\n    const char* const kSymbolizerArch = \"--default-arch=x86_64h\";\n#elif defined(__x86_64__)\n    const char* const kSymbolizerArch = \"--default-arch=x86_64\";\n#elif defined(__i386__)\n    const char* const kSymbolizerArch = \"--default-arch=i386\";\n#elif SANITIZER_RISCV64\n    const char *const kSymbolizerArch = \"--default-arch=riscv64\";\n#elif defined(__aarch64__)\n    const char* const kSymbolizerArch = \"--default-arch=arm64\";\n#elif defined(__arm__)\n    const char* const kSymbolizerArch = \"--default-arch=arm\";\n#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n    const char* const kSymbolizerArch = \"--default-arch=powerpc64\";\n#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__\n    const char* const kSymbolizerArch = \"--default-arch=powerpc64le\";\n#elif defined(__s390x__)\n    const char* const kSymbolizerArch = \"--default-arch=s390x\";\n#elif defined(__s390__)\n    const char* const kSymbolizerArch = \"--default-arch=s390\";\n#else\n    const char* const kSymbolizerArch = \"--default-arch=unknown\";\n#endif\n\n    const char *const demangle_flag =\n        common_flags()->demangle ? \"--demangle\" : \"--no-demangle\";\n    const char *const inline_flag =\n        common_flags()->symbolize_inline_frames ? \"--inlines\" : \"--no-inlines\";\n    int i = 0;\n    argv[i++] = path_to_binary;\n    argv[i++] = demangle_flag;\n    argv[i++] = inline_flag;\n    argv[i++] = kSymbolizerArch;\n    argv[i++] = nullptr;\n    CHECK_LE(i, kArgVMax);\n  }\n};\n\nLLVMSymbolizer::LLVMSymbolizer(const char *path, LowLevelAllocator *allocator)\n    : symbolizer_process_(new(*allocator) LLVMSymbolizerProcess(path)) {}\n\n// Parse a <file>:<line>[:<column>] buffer. The file path may contain colons on\n// Windows, so extract tokens from the right hand side first. The column info is\n// also optional.\nstatic const char *ParseFileLineInfo(AddressInfo *info, const char *str) {\n  char *file_line_info = nullptr;\n  str = ExtractToken(str, \"\\n\", &file_line_info);\n  CHECK(file_line_info);\n\n  if (uptr size = internal_strlen(file_line_info)) {\n    char *back = file_line_info + size - 1;\n    for (int i = 0; i < 2; ++i) {\n      while (back > file_line_info && IsDigit(*back)) --back;\n      if (*back != ':' || !IsDigit(back[1])) break;\n      info->column = info->line;\n      info->line = internal_atoll(back + 1);\n      // Truncate the string at the colon to keep only filename.\n      *back = '\\0';\n      --back;\n    }\n    ExtractToken(file_line_info, \"\", &info->file);\n  }\n\n  InternalFree(file_line_info);\n  return str;\n}\n\n// Parses one or more two-line strings in the following format:\n//   <function_name>\n//   <file_name>:<line_number>[:<column_number>]\n// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of\n// them use the same output format.\nvoid ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {\n  bool top_frame = true;\n  SymbolizedStack *last = res;\n  while (true) {\n    char *function_name = nullptr;\n    str = ExtractToken(str, \"\\n\", &function_name);\n    CHECK(function_name);\n    if (function_name[0] == '\\0') {\n      // There are no more frames.\n      InternalFree(function_name);\n      break;\n    }\n    SymbolizedStack *cur;\n    if (top_frame) {\n      cur = res;\n      top_frame = false;\n    } else {\n      cur = SymbolizedStack::New(res->info.address);\n      cur->info.FillModuleInfo(res->info.module, res->info.module_offset,\n                               res->info.module_arch);\n      last->next = cur;\n      last = cur;\n    }\n\n    AddressInfo *info = &cur->info;\n    info->function = function_name;\n    str = ParseFileLineInfo(info, str);\n\n    // Functions and filenames can be \"??\", in which case we write 0\n    // to address info to mark that names are unknown.\n    if (0 == internal_strcmp(info->function, \"??\")) {\n      InternalFree(info->function);\n      info->function = 0;\n    }\n    if (info->file && 0 == internal_strcmp(info->file, \"??\")) {\n      InternalFree(info->file);\n      info->file = 0;\n    }\n  }\n}\n\n// Parses a two-line string in the following format:\n//   <symbol_name>\n//   <start_address> <size>\n// Used by LLVMSymbolizer and InternalSymbolizer.\nvoid ParseSymbolizeDataOutput(const char *str, DataInfo *info) {\n  str = ExtractToken(str, \"\\n\", &info->name);\n  str = ExtractUptr(str, \" \", &info->start);\n  str = ExtractUptr(str, \"\\n\", &info->size);\n}\n\nstatic void ParseSymbolizeFrameOutput(const char *str,\n                                      InternalMmapVector<LocalInfo> *locals) {\n  if (internal_strncmp(str, \"??\", 2) == 0)\n    return;\n\n  while (*str) {\n    LocalInfo local;\n    str = ExtractToken(str, \"\\n\", &local.function_name);\n    str = ExtractToken(str, \"\\n\", &local.name);\n\n    AddressInfo addr;\n    str = ParseFileLineInfo(&addr, str);\n    local.decl_file = addr.file;\n    local.decl_line = addr.line;\n\n    local.has_frame_offset = internal_strncmp(str, \"??\", 2) != 0;\n    str = ExtractSptr(str, \" \", &local.frame_offset);\n\n    local.has_size = internal_strncmp(str, \"??\", 2) != 0;\n    str = ExtractUptr(str, \" \", &local.size);\n\n    local.has_tag_offset = internal_strncmp(str, \"??\", 2) != 0;\n    str = ExtractUptr(str, \"\\n\", &local.tag_offset);\n\n    locals->push_back(local);\n  }\n}\n\nbool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {\n  AddressInfo *info = &stack->info;\n  const char *buf = FormatAndSendCommand(\n      \"CODE\", info->module, info->module_offset, info->module_arch);\n  if (!buf)\n    return false;\n  ParseSymbolizePCOutput(buf, stack);\n  return true;\n}\n\nbool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {\n  const char *buf = FormatAndSendCommand(\n      \"DATA\", info->module, info->module_offset, info->module_arch);\n  if (!buf)\n    return false;\n  ParseSymbolizeDataOutput(buf, info);\n  info->start += (addr - info->module_offset); // Add the base address.\n  return true;\n}\n\nbool LLVMSymbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {\n  const char *buf = FormatAndSendCommand(\n      \"FRAME\", info->module, info->module_offset, info->module_arch);\n  if (!buf)\n    return false;\n  ParseSymbolizeFrameOutput(buf, &info->locals);\n  return true;\n}\n\nconst char *LLVMSymbolizer::FormatAndSendCommand(const char *command_prefix,\n                                                 const char *module_name,\n                                                 uptr module_offset,\n                                                 ModuleArch arch) {\n  CHECK(module_name);\n  int size_needed = 0;\n  if (arch == kModuleArchUnknown)\n    size_needed = internal_snprintf(buffer_, kBufferSize, \"%s \\\"%s\\\" 0x%zx\\n\",\n                                    command_prefix, module_name, module_offset);\n  else\n    size_needed = internal_snprintf(buffer_, kBufferSize,\n                                    \"%s \\\"%s:%s\\\" 0x%zx\\n\", command_prefix,\n                                    module_name, ModuleArchToString(arch),\n                                    module_offset);\n\n  if (size_needed >= static_cast<int>(kBufferSize)) {\n    Report(\"WARNING: Command buffer too small\");\n    return nullptr;\n  }\n\n  return symbolizer_process_->SendCommand(buffer_);\n}\n\nSymbolizerProcess::SymbolizerProcess(const char *path, bool use_posix_spawn)\n    : path_(path),\n      input_fd_(kInvalidFd),\n      output_fd_(kInvalidFd),\n      times_restarted_(0),\n      failed_to_start_(false),\n      reported_invalid_path_(false),\n      use_posix_spawn_(use_posix_spawn) {\n  CHECK(path_);\n  CHECK_NE(path_[0], '\\0');\n}\n\nstatic bool IsSameModule(const char* path) {\n  if (const char* ProcessName = GetProcessName()) {\n    if (const char* SymbolizerName = StripModuleName(path)) {\n      return !internal_strcmp(ProcessName, SymbolizerName);\n    }\n  }\n  return false;\n}\n\nconst char *SymbolizerProcess::SendCommand(const char *command) {\n  if (failed_to_start_)\n    return nullptr;\n  if (IsSameModule(path_)) {\n    Report(\"WARNING: Symbolizer was blocked from starting itself!\\n\");\n    failed_to_start_ = true;\n    return nullptr;\n  }\n  for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {\n    // Start or restart symbolizer if we failed to send command to it.\n    if (const char *res = SendCommandImpl(command))\n      return res;\n    Restart();\n  }\n  if (!failed_to_start_) {\n    Report(\"WARNING: Failed to use and restart external symbolizer!\\n\");\n    failed_to_start_ = true;\n  }\n  return nullptr;\n}\n\nconst char *SymbolizerProcess::SendCommandImpl(const char *command) {\n  if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)\n      return nullptr;\n  if (!WriteToSymbolizer(command, internal_strlen(command)))\n      return nullptr;\n  if (!ReadFromSymbolizer(buffer_, kBufferSize))\n      return nullptr;\n  return buffer_;\n}\n\nbool SymbolizerProcess::Restart() {\n  if (input_fd_ != kInvalidFd)\n    CloseFile(input_fd_);\n  if (output_fd_ != kInvalidFd)\n    CloseFile(output_fd_);\n  return StartSymbolizerSubprocess();\n}\n\nbool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {\n  if (max_length == 0)\n    return true;\n  uptr read_len = 0;\n  while (true) {\n    uptr just_read = 0;\n    bool success = ReadFromFile(input_fd_, buffer + read_len,\n                                max_length - read_len - 1, &just_read);\n    // We can't read 0 bytes, as we don't expect external symbolizer to close\n    // its stdout.\n    if (!success || just_read == 0) {\n      Report(\"WARNING: Can't read from symbolizer at fd %d\\n\", input_fd_);\n      return false;\n    }\n    read_len += just_read;\n    if (ReachedEndOfOutput(buffer, read_len))\n      break;\n    if (read_len + 1 == max_length) {\n      Report(\"WARNING: Symbolizer buffer too small\\n\");\n      read_len = 0;\n      break;\n    }\n  }\n  buffer[read_len] = '\\0';\n  return true;\n}\n\nbool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {\n  if (length == 0)\n    return true;\n  uptr write_len = 0;\n  bool success = WriteToFile(output_fd_, buffer, length, &write_len);\n  if (!success || write_len != length) {\n    Report(\"WARNING: Can't write to symbolizer at fd %d\\n\", output_fd_);\n    return false;\n  }\n  return true;\n}\n\n#endif  // !SANITIZER_SYMBOLIZER_MARKUP\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_mac.cpp",
    "content": "//===-- sanitizer_symbolizer_mac.cpp --------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries.\n//\n// Implementation of Mac-specific \"atos\" symbolizer.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_MAC\n\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_mac.h\"\n#include \"sanitizer_symbolizer_mac.h\"\n\n#include <dlfcn.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <sys/wait.h>\n#include <unistd.h>\n#include <util.h>\n\nnamespace __sanitizer {\n\nbool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {\n  Dl_info info;\n  int result = dladdr((const void *)addr, &info);\n  if (!result) return false;\n\n  // Compute offset if possible. `dladdr()` doesn't always ensure that `addr >=\n  // sym_addr` so only compute the offset when this holds. Failure to find the\n  // function offset is not treated as a failure because it might still be\n  // possible to get the symbol name.\n  uptr sym_addr = reinterpret_cast<uptr>(info.dli_saddr);\n  if (addr >= sym_addr) {\n    stack->info.function_offset = addr - sym_addr;\n  }\n\n  const char *demangled = DemangleSwiftAndCXX(info.dli_sname);\n  if (!demangled) return false;\n  stack->info.function = internal_strdup(demangled);\n  return true;\n}\n\nbool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {\n  Dl_info info;\n  int result = dladdr((const void *)addr, &info);\n  if (!result) return false;\n  const char *demangled = DemangleSwiftAndCXX(info.dli_sname);\n  datainfo->name = internal_strdup(demangled);\n  datainfo->start = (uptr)info.dli_saddr;\n  return true;\n}\n\nclass AtosSymbolizerProcess final : public SymbolizerProcess {\n public:\n  explicit AtosSymbolizerProcess(const char *path)\n      : SymbolizerProcess(path, /*use_posix_spawn*/ true) {\n    pid_str_[0] = '\\0';\n  }\n\n private:\n  bool StartSymbolizerSubprocess() override {\n    // Put the string command line argument in the object so that it outlives\n    // the call to GetArgV.\n    internal_snprintf(pid_str_, sizeof(pid_str_), \"%d\", (int)internal_getpid());\n\n    // Configure sandbox before starting atos process.\n    return SymbolizerProcess::StartSymbolizerSubprocess();\n  }\n\n  bool ReachedEndOfOutput(const char *buffer, uptr length) const override {\n    return (length >= 1 && buffer[length - 1] == '\\n');\n  }\n\n  void GetArgV(const char *path_to_binary,\n               const char *(&argv)[kArgVMax]) const override {\n    int i = 0;\n    argv[i++] = path_to_binary;\n    argv[i++] = \"-p\";\n    argv[i++] = &pid_str_[0];\n    if (GetMacosAlignedVersion() == MacosVersion(10, 9)) {\n      // On Mavericks atos prints a deprecation warning which we suppress by\n      // passing -d. The warning isn't present on other OSX versions, even the\n      // newer ones.\n      argv[i++] = \"-d\";\n    }\n    argv[i++] = nullptr;\n    CHECK_LE(i, kArgVMax);\n  }\n\n  char pid_str_[16];\n};\n\n#undef K_ATOS_ENV_VAR\n\nstatic bool ParseCommandOutput(const char *str, uptr addr, char **out_name,\n                               char **out_module, char **out_file, uptr *line,\n                               uptr *start_address) {\n  // Trim ending newlines.\n  char *trim;\n  ExtractTokenUpToDelimiter(str, \"\\n\", &trim);\n\n  // The line from `atos` is in one of these formats:\n  //   myfunction (in library.dylib) (sourcefile.c:17)\n  //   myfunction (in library.dylib) + 0x1fe\n  //   myfunction (in library.dylib) + 15\n  //   0xdeadbeef (in library.dylib) + 0x1fe\n  //   0xdeadbeef (in library.dylib) + 15\n  //   0xdeadbeef (in library.dylib)\n  //   0xdeadbeef\n\n  const char *rest = trim;\n  char *symbol_name;\n  rest = ExtractTokenUpToDelimiter(rest, \" (in \", &symbol_name);\n  if (rest[0] == '\\0') {\n    InternalFree(symbol_name);\n    InternalFree(trim);\n    return false;\n  }\n\n  if (internal_strncmp(symbol_name, \"0x\", 2) != 0)\n    *out_name = symbol_name;\n  else\n    InternalFree(symbol_name);\n  rest = ExtractTokenUpToDelimiter(rest, \") \", out_module);\n\n  if (rest[0] == '(') {\n    if (out_file) {\n      rest++;\n      rest = ExtractTokenUpToDelimiter(rest, \":\", out_file);\n      char *extracted_line_number;\n      rest = ExtractTokenUpToDelimiter(rest, \")\", &extracted_line_number);\n      if (line) *line = (uptr)internal_atoll(extracted_line_number);\n      InternalFree(extracted_line_number);\n    }\n  } else if (rest[0] == '+') {\n    rest += 2;\n    uptr offset = internal_atoll(rest);\n    if (start_address) *start_address = addr - offset;\n  }\n\n  InternalFree(trim);\n  return true;\n}\n\nAtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator)\n    : process_(new (*allocator) AtosSymbolizerProcess(path)) {}\n\nbool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {\n  if (!process_) return false;\n  if (addr == 0) return false;\n  char command[32];\n  internal_snprintf(command, sizeof(command), \"0x%zx\\n\", addr);\n  const char *buf = process_->SendCommand(command);\n  if (!buf) return false;\n  uptr line;\n  uptr start_address = AddressInfo::kUnknown;\n  if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,\n                          &stack->info.file, &line, &start_address)) {\n    process_ = nullptr;\n    return false;\n  }\n  stack->info.line = (int)line;\n\n  if (start_address == AddressInfo::kUnknown) {\n    // Fallback to dladdr() to get function start address if atos doesn't report\n    // it.\n    Dl_info info;\n    int result = dladdr((const void *)addr, &info);\n    if (result)\n      start_address = reinterpret_cast<uptr>(info.dli_saddr);\n  }\n\n  // Only assign to `function_offset` if we were able to get the function's\n  // start address and we got a sensible `start_address` (dladdr doesn't always\n  // ensure that `addr >= sym_addr`).\n  if (start_address != AddressInfo::kUnknown && addr >= start_address) {\n    stack->info.function_offset = addr - start_address;\n  }\n  return true;\n}\n\nbool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {\n  if (!process_) return false;\n  char command[32];\n  internal_snprintf(command, sizeof(command), \"0x%zx\\n\", addr);\n  const char *buf = process_->SendCommand(command);\n  if (!buf) return false;\n  if (!ParseCommandOutput(buf, addr, &info->name, &info->module, nullptr,\n                          nullptr, &info->start)) {\n    process_ = nullptr;\n    return false;\n  }\n  return true;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_mac.h",
    "content": "//===-- sanitizer_symbolizer_mac.h ------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries.\n//\n// Header for Mac-specific \"atos\" symbolizer.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_SYMBOLIZER_MAC_H\n#define SANITIZER_SYMBOLIZER_MAC_H\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_MAC\n\n#include \"sanitizer_symbolizer_internal.h\"\n\nnamespace __sanitizer {\n\nclass DlAddrSymbolizer final : public SymbolizerTool {\n public:\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;\n  bool SymbolizeData(uptr addr, DataInfo *info) override;\n};\n\nclass AtosSymbolizerProcess;\n\nclass AtosSymbolizer final : public SymbolizerTool {\n public:\n  explicit AtosSymbolizer(const char *path, LowLevelAllocator *allocator);\n\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;\n  bool SymbolizeData(uptr addr, DataInfo *info) override;\n\n private:\n  AtosSymbolizerProcess *process_;\n};\n\n} // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n\n#endif // SANITIZER_SYMBOLIZER_MAC_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_markup.cpp",
    "content": "//===-- sanitizer_symbolizer_markup.cpp -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries.\n//\n// Implementation of offline markup symbolizer.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_SYMBOLIZER_MARKUP\n\n#if SANITIZER_FUCHSIA\n#include \"sanitizer_symbolizer_fuchsia.h\"\n#  endif\n\n#  include <limits.h>\n#  include <unwind.h>\n\n#  include \"sanitizer_stacktrace.h\"\n#  include \"sanitizer_symbolizer.h\"\n\nnamespace __sanitizer {\n\n// This generic support for offline symbolizing is based on the\n// Fuchsia port.  We don't do any actual symbolization per se.\n// Instead, we emit text containing raw addresses and raw linkage\n// symbol names, embedded in Fuchsia's symbolization markup format.\n// Fuchsia's logging infrastructure emits enough information about\n// process memory layout that a post-processing filter can do the\n// symbolization and pretty-print the markup.  See the spec at:\n// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md\n\n// This is used by UBSan for type names, and by ASan for global variable names.\n// It's expected to return a static buffer that will be reused on each call.\nconst char *Symbolizer::Demangle(const char *name) {\n  static char buffer[kFormatDemangleMax];\n  internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);\n  return buffer;\n}\n\n// This is used mostly for suppression matching.  Making it work\n// would enable \"interceptor_via_lib\" suppressions.  It's also used\n// once in UBSan to say \"in module ...\" in a message that also\n// includes an address in the module, so post-processing can already\n// pretty-print that so as to indicate the module.\nbool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,\n                                             uptr *module_address) {\n  return false;\n}\n\n// This is mainly used by hwasan for online symbolization. This isn't needed\n// since hwasan can always just dump stack frames for offline symbolization.\nbool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }\n\n// This is used in some places for suppression checking, which we\n// don't really support for Fuchsia.  It's also used in UBSan to\n// identify a PC location to a function name, so we always fill in\n// the function member with a string containing markup around the PC\n// value.\n// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan\n// to render stack frames, but that should be changed to use\n// RenderStackFrame.\nSymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {\n  SymbolizedStack *s = SymbolizedStack::New(addr);\n  char buffer[kFormatFunctionMax];\n  internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);\n  s->info.function = internal_strdup(buffer);\n  return s;\n}\n\n// Always claim we succeeded, so that RenderDataInfo will be called.\nbool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {\n  info->Clear();\n  info->start = addr;\n  return true;\n}\n\n// We ignore the format argument to __sanitizer_symbolize_global.\nvoid RenderData(InternalScopedString *buffer, const char *format,\n                const DataInfo *DI, const char *strip_path_prefix) {\n  buffer->append(kFormatData, DI->start);\n}\n\nbool RenderNeedsSymbolization(const char *format) { return false; }\n\n// We don't support the stack_trace_format flag at all.\nvoid RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,\n                 uptr address, const AddressInfo *info, bool vs_style,\n                 const char *strip_path_prefix, const char *strip_func_prefix) {\n  CHECK(!RenderNeedsSymbolization(format));\n  buffer->append(kFormatFrame, frame_no, address);\n}\n\nSymbolizer *Symbolizer::PlatformInit() {\n  return new (symbolizer_allocator_) Symbolizer({});\n}\n\nvoid Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }\n\nvoid StartReportDeadlySignal() {}\nvoid ReportDeadlySignal(const SignalContext &sig, u32 tid,\n                        UnwindSignalStackCallbackType unwind,\n                        const void *unwind_context) {}\n\n#if SANITIZER_CAN_SLOW_UNWIND\nstruct UnwindTraceArg {\n  BufferedStackTrace *stack;\n  u32 max_depth;\n};\n\n_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {\n  UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);\n  CHECK_LT(arg->stack->size, arg->max_depth);\n  uptr pc = _Unwind_GetIP(ctx);\n  if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;\n  arg->stack->trace_buffer[arg->stack->size++] = pc;\n  return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP\n                                             : _URC_NO_REASON);\n}\n\nvoid BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {\n  CHECK_GE(max_depth, 2);\n  size = 0;\n  UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};\n  _Unwind_Backtrace(Unwind_Trace, &arg);\n  CHECK_GT(size, 0);\n  // We need to pop a few frames so that pc is on top.\n  uptr to_pop = LocatePcInTrace(pc);\n  // trace_buffer[0] belongs to the current function so we always pop it,\n  // unless there is only 1 frame in the stack trace (1 frame is always better\n  // than 0!).\n  PopStackFrames(Min(to_pop, static_cast<uptr>(1)));\n  trace_buffer[0] = pc;\n}\n\nvoid BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {\n  CHECK(context);\n  CHECK_GE(max_depth, 2);\n  UNREACHABLE(\"signal context doesn't exist\");\n}\n#endif  // SANITIZER_CAN_SLOW_UNWIND\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SYMBOLIZER_MARKUP\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp",
    "content": "//===-- sanitizer_symbolizer_posix_libcdep.cpp ----------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n// POSIX-specific implementation of symbolizer parts.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_POSIX\n#include \"sanitizer_allocator_internal.h\"\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_internal_defs.h\"\n#include \"sanitizer_linux.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_posix.h\"\n#include \"sanitizer_procmaps.h\"\n#include \"sanitizer_symbolizer_internal.h\"\n#include \"sanitizer_symbolizer_libbacktrace.h\"\n#include \"sanitizer_symbolizer_mac.h\"\n\n#include <dlfcn.h>   // for dlsym()\n#include <errno.h>\n#include <stdint.h>\n#include <stdlib.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\n// C++ demangling function, as required by Itanium C++ ABI. This is weak,\n// because we do not require a C++ ABI library to be linked to a program\n// using sanitizers; if it's not present, we'll just use the mangled name.\nnamespace __cxxabiv1 {\n  extern \"C\" SANITIZER_WEAK_ATTRIBUTE\n  char *__cxa_demangle(const char *mangled, char *buffer,\n                                  size_t *length, int *status);\n}\n\nnamespace __sanitizer {\n\n// Attempts to demangle the name via __cxa_demangle from __cxxabiv1.\nconst char *DemangleCXXABI(const char *name) {\n  // FIXME: __cxa_demangle aggressively insists on allocating memory.\n  // There's not much we can do about that, short of providing our\n  // own demangler (libc++abi's implementation could be adapted so that\n  // it does not allocate). For now, we just call it anyway, and we leak\n  // the returned value.\n  if (&__cxxabiv1::__cxa_demangle)\n    if (const char *demangled_name =\n          __cxxabiv1::__cxa_demangle(name, 0, 0, 0))\n      return demangled_name;\n\n  return name;\n}\n\n// As of now, there are no headers for the Swift runtime. Once they are\n// present, we will weakly link since we do not require Swift runtime to be\n// linked.\ntypedef char *(*swift_demangle_ft)(const char *mangledName,\n                                   size_t mangledNameLength, char *outputBuffer,\n                                   size_t *outputBufferSize, uint32_t flags);\nstatic swift_demangle_ft swift_demangle_f;\n\n// This must not happen lazily at symbolication time, because dlsym uses\n// malloc and thread-local storage, which is not a good thing to do during\n// symbolication.\nstatic void InitializeSwiftDemangler() {\n  swift_demangle_f = (swift_demangle_ft)dlsym(RTLD_DEFAULT, \"swift_demangle\");\n  (void)dlerror(); // Cleanup error message in case of failure\n}\n\n// Attempts to demangle a Swift name. The demangler will return nullptr if a\n// non-Swift name is passed in.\nconst char *DemangleSwift(const char *name) {\n  if (swift_demangle_f)\n    return swift_demangle_f(name, internal_strlen(name), 0, 0, 0);\n\n  return nullptr;\n}\n\nconst char *DemangleSwiftAndCXX(const char *name) {\n  if (!name) return nullptr;\n  if (const char *swift_demangled_name = DemangleSwift(name))\n    return swift_demangled_name;\n  return DemangleCXXABI(name);\n}\n\nstatic bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) {\n  int *infd = NULL;\n  int *outfd = NULL;\n  // The client program may close its stdin and/or stdout and/or stderr\n  // thus allowing socketpair to reuse file descriptors 0, 1 or 2.\n  // In this case the communication between the forked processes may be\n  // broken if either the parent or the child tries to close or duplicate\n  // these descriptors. The loop below produces two pairs of file\n  // descriptors, each greater than 2 (stderr).\n  int sock_pair[5][2];\n  for (int i = 0; i < 5; i++) {\n    if (pipe(sock_pair[i]) == -1) {\n      for (int j = 0; j < i; j++) {\n        internal_close(sock_pair[j][0]);\n        internal_close(sock_pair[j][1]);\n      }\n      return false;\n    } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {\n      if (infd == NULL) {\n        infd = sock_pair[i];\n      } else {\n        outfd = sock_pair[i];\n        for (int j = 0; j < i; j++) {\n          if (sock_pair[j] == infd) continue;\n          internal_close(sock_pair[j][0]);\n          internal_close(sock_pair[j][1]);\n        }\n        break;\n      }\n    }\n  }\n  CHECK(infd);\n  CHECK(outfd);\n  infd_[0] = infd[0];\n  infd_[1] = infd[1];\n  outfd_[0] = outfd[0];\n  outfd_[1] = outfd[1];\n  return true;\n}\n\nbool SymbolizerProcess::StartSymbolizerSubprocess() {\n  if (!FileExists(path_)) {\n    if (!reported_invalid_path_) {\n      Report(\"WARNING: invalid path to external symbolizer!\\n\");\n      reported_invalid_path_ = true;\n    }\n    return false;\n  }\n\n  const char *argv[kArgVMax];\n  GetArgV(path_, argv);\n  pid_t pid;\n\n  // Report how symbolizer is being launched for debugging purposes.\n  if (Verbosity() >= 3) {\n    // Only use `Report` for first line so subsequent prints don't get prefixed\n    // with current PID.\n    Report(\"Launching Symbolizer process: \");\n    for (unsigned index = 0; index < kArgVMax && argv[index]; ++index)\n      Printf(\"%s \", argv[index]);\n    Printf(\"\\n\");\n  }\n\n  if (use_posix_spawn_) {\n#if SANITIZER_MAC\n    fd_t fd = internal_spawn(argv, const_cast<const char **>(GetEnvP()), &pid);\n    if (fd == kInvalidFd) {\n      Report(\"WARNING: failed to spawn external symbolizer (errno: %d)\\n\",\n             errno);\n      return false;\n    }\n\n    input_fd_ = fd;\n    output_fd_ = fd;\n#else  // SANITIZER_MAC\n    UNIMPLEMENTED();\n#endif  // SANITIZER_MAC\n  } else {\n    fd_t infd[2] = {}, outfd[2] = {};\n    if (!CreateTwoHighNumberedPipes(infd, outfd)) {\n      Report(\"WARNING: Can't create a socket pair to start \"\n             \"external symbolizer (errno: %d)\\n\", errno);\n      return false;\n    }\n\n    pid = StartSubprocess(path_, argv, GetEnvP(), /* stdin */ outfd[0],\n                          /* stdout */ infd[1]);\n    if (pid < 0) {\n      internal_close(infd[0]);\n      internal_close(outfd[1]);\n      return false;\n    }\n\n    input_fd_ = infd[0];\n    output_fd_ = outfd[1];\n  }\n\n  CHECK_GT(pid, 0);\n\n  // Check that symbolizer subprocess started successfully.\n  SleepForMillis(kSymbolizerStartupTimeMillis);\n  if (!IsProcessRunning(pid)) {\n    // Either waitpid failed, or child has already exited.\n    Report(\"WARNING: external symbolizer didn't start up correctly!\\n\");\n    return false;\n  }\n\n  return true;\n}\n\nclass Addr2LineProcess final : public SymbolizerProcess {\n public:\n  Addr2LineProcess(const char *path, const char *module_name)\n      : SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}\n\n  const char *module_name() const { return module_name_; }\n\n private:\n  void GetArgV(const char *path_to_binary,\n               const char *(&argv)[kArgVMax]) const override {\n    int i = 0;\n    argv[i++] = path_to_binary;\n    if (common_flags()->demangle)\n      argv[i++] = \"-C\";\n    if (common_flags()->symbolize_inline_frames)\n      argv[i++] = \"-i\";\n    argv[i++] = \"-fe\";\n    argv[i++] = module_name_;\n    argv[i++] = nullptr;\n    CHECK_LE(i, kArgVMax);\n  }\n\n  bool ReachedEndOfOutput(const char *buffer, uptr length) const override;\n\n  bool ReadFromSymbolizer(char *buffer, uptr max_length) override {\n    if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))\n      return false;\n    // The returned buffer is empty when output is valid, but exceeds\n    // max_length.\n    if (*buffer == '\\0')\n      return true;\n    // We should cut out output_terminator_ at the end of given buffer,\n    // appended by addr2line to mark the end of its meaningful output.\n    // We cannot scan buffer from it's beginning, because it is legal for it\n    // to start with output_terminator_ in case given offset is invalid. So,\n    // scanning from second character.\n    char *garbage = internal_strstr(buffer + 1, output_terminator_);\n    // This should never be NULL since buffer must end up with\n    // output_terminator_.\n    CHECK(garbage);\n    // Trim the buffer.\n    garbage[0] = '\\0';\n    return true;\n  }\n\n  const char *module_name_;  // Owned, leaked.\n  static const char output_terminator_[];\n};\n\nconst char Addr2LineProcess::output_terminator_[] = \"??\\n??:0\\n\";\n\nbool Addr2LineProcess::ReachedEndOfOutput(const char *buffer,\n                                          uptr length) const {\n  const size_t kTerminatorLen = sizeof(output_terminator_) - 1;\n  // Skip, if we read just kTerminatorLen bytes, because Addr2Line output\n  // should consist at least of two pairs of lines:\n  // 1. First one, corresponding to given offset to be symbolized\n  // (may be equal to output_terminator_, if offset is not valid).\n  // 2. Second one for output_terminator_, itself to mark the end of output.\n  if (length <= kTerminatorLen) return false;\n  // Addr2Line output should end up with output_terminator_.\n  return !internal_memcmp(buffer + length - kTerminatorLen,\n                          output_terminator_, kTerminatorLen);\n}\n\nclass Addr2LinePool final : public SymbolizerTool {\n public:\n  explicit Addr2LinePool(const char *addr2line_path,\n                         LowLevelAllocator *allocator)\n      : addr2line_path_(addr2line_path), allocator_(allocator) {\n    addr2line_pool_.reserve(16);\n  }\n\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {\n    if (const char *buf =\n            SendCommand(stack->info.module, stack->info.module_offset)) {\n      ParseSymbolizePCOutput(buf, stack);\n      return true;\n    }\n    return false;\n  }\n\n  bool SymbolizeData(uptr addr, DataInfo *info) override {\n    return false;\n  }\n\n private:\n  const char *SendCommand(const char *module_name, uptr module_offset) {\n    Addr2LineProcess *addr2line = 0;\n    for (uptr i = 0; i < addr2line_pool_.size(); ++i) {\n      if (0 ==\n          internal_strcmp(module_name, addr2line_pool_[i]->module_name())) {\n        addr2line = addr2line_pool_[i];\n        break;\n      }\n    }\n    if (!addr2line) {\n      addr2line =\n          new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);\n      addr2line_pool_.push_back(addr2line);\n    }\n    CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name()));\n    char buffer[kBufferSize];\n    internal_snprintf(buffer, kBufferSize, \"0x%zx\\n0x%zx\\n\",\n                      module_offset, dummy_address_);\n    return addr2line->SendCommand(buffer);\n  }\n\n  static const uptr kBufferSize = 64;\n  const char *addr2line_path_;\n  LowLevelAllocator *allocator_;\n  InternalMmapVector<Addr2LineProcess*> addr2line_pool_;\n  static const uptr dummy_address_ =\n      FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);\n};\n\n#  if SANITIZER_SUPPORTS_WEAK_HOOKS\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool\n__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,\n                           char *Buffer, int MaxLength);\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool\n__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,\n                           char *Buffer, int MaxLength);\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void\n__sanitizer_symbolize_flush();\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int\n__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength);\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool\n__sanitizer_symbolize_set_demangle(bool Demangle);\nSANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool\n__sanitizer_symbolize_set_inline_frames(bool InlineFrames);\n}  // extern \"C\"\n\nclass InternalSymbolizer final : public SymbolizerTool {\n public:\n  static InternalSymbolizer *get(LowLevelAllocator *alloc) {\n    if (__sanitizer_symbolize_set_demangle)\n      CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));\n    if (__sanitizer_symbolize_set_inline_frames)\n      CHECK(__sanitizer_symbolize_set_inline_frames(\n          common_flags()->symbolize_inline_frames));\n    if (__sanitizer_symbolize_code && __sanitizer_symbolize_data)\n      return new (*alloc) InternalSymbolizer();\n    return 0;\n  }\n\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {\n    bool result = __sanitizer_symbolize_code(\n        stack->info.module, stack->info.module_offset, buffer_, kBufferSize);\n    if (result)\n      ParseSymbolizePCOutput(buffer_, stack);\n    return result;\n  }\n\n  bool SymbolizeData(uptr addr, DataInfo *info) override {\n    bool result = __sanitizer_symbolize_data(info->module, info->module_offset,\n                                             buffer_, kBufferSize);\n    if (result) {\n      ParseSymbolizeDataOutput(buffer_, info);\n      info->start += (addr - info->module_offset);  // Add the base address.\n    }\n    return result;\n  }\n\n  void Flush() override {\n    if (__sanitizer_symbolize_flush)\n      __sanitizer_symbolize_flush();\n  }\n\n  const char *Demangle(const char *name) override {\n    if (__sanitizer_symbolize_demangle) {\n      for (uptr res_length = 1024;\n           res_length <= InternalSizeClassMap::kMaxSize;) {\n        char *res_buff = static_cast<char *>(InternalAlloc(res_length));\n        uptr req_length =\n            __sanitizer_symbolize_demangle(name, res_buff, res_length);\n        if (req_length > res_length) {\n          res_length = req_length + 1;\n          InternalFree(res_buff);\n          continue;\n        }\n        return res_buff;\n      }\n    }\n    return name;\n  }\n\n private:\n  InternalSymbolizer() {}\n\n  static const int kBufferSize = 16 * 1024;\n  char buffer_[kBufferSize];\n};\n#  else  // SANITIZER_SUPPORTS_WEAK_HOOKS\n\nclass InternalSymbolizer final : public SymbolizerTool {\n public:\n  static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }\n};\n\n#  endif  // SANITIZER_SUPPORTS_WEAK_HOOKS\n\nconst char *Symbolizer::PlatformDemangle(const char *name) {\n  return DemangleSwiftAndCXX(name);\n}\n\nstatic SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {\n  const char *path = common_flags()->external_symbolizer_path;\n\n  if (path && internal_strchr(path, '%')) {\n    char *new_path = (char *)InternalAlloc(kMaxPathLength);\n    SubstituteForFlagValue(path, new_path, kMaxPathLength);\n    path = new_path;\n  }\n\n  const char *binary_name = path ? StripModuleName(path) : \"\";\n  static const char kLLVMSymbolizerPrefix[] = \"llvm-symbolizer\";\n  if (path && path[0] == '\\0') {\n    VReport(2, \"External symbolizer is explicitly disabled.\\n\");\n    return nullptr;\n  } else if (!internal_strncmp(binary_name, kLLVMSymbolizerPrefix,\n                               internal_strlen(kLLVMSymbolizerPrefix))) {\n    VReport(2, \"Using llvm-symbolizer at user-specified path: %s\\n\", path);\n    return new(*allocator) LLVMSymbolizer(path, allocator);\n  } else if (!internal_strcmp(binary_name, \"atos\")) {\n#if SANITIZER_MAC\n    VReport(2, \"Using atos at user-specified path: %s\\n\", path);\n    return new(*allocator) AtosSymbolizer(path, allocator);\n#else  // SANITIZER_MAC\n    Report(\"ERROR: Using `atos` is only supported on Darwin.\\n\");\n    Die();\n#endif  // SANITIZER_MAC\n  } else if (!internal_strcmp(binary_name, \"addr2line\")) {\n    VReport(2, \"Using addr2line at user-specified path: %s\\n\", path);\n    return new(*allocator) Addr2LinePool(path, allocator);\n  } else if (path) {\n    Report(\"ERROR: External symbolizer path is set to '%s' which isn't \"\n           \"a known symbolizer. Please set the path to the llvm-symbolizer \"\n           \"binary or other known tool.\\n\", path);\n    Die();\n  }\n\n  // Otherwise symbolizer program is unknown, let's search $PATH\n  CHECK(path == nullptr);\n#if SANITIZER_MAC\n  if (const char *found_path = FindPathToBinary(\"atos\")) {\n    VReport(2, \"Using atos found at: %s\\n\", found_path);\n    return new(*allocator) AtosSymbolizer(found_path, allocator);\n  }\n#endif  // SANITIZER_MAC\n  if (const char *found_path = FindPathToBinary(\"llvm-symbolizer\")) {\n    VReport(2, \"Using llvm-symbolizer found at: %s\\n\", found_path);\n    return new(*allocator) LLVMSymbolizer(found_path, allocator);\n  }\n  if (common_flags()->allow_addr2line) {\n    if (const char *found_path = FindPathToBinary(\"addr2line\")) {\n      VReport(2, \"Using addr2line found at: %s\\n\", found_path);\n      return new(*allocator) Addr2LinePool(found_path, allocator);\n    }\n  }\n  return nullptr;\n}\n\nstatic void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,\n                                  LowLevelAllocator *allocator) {\n  if (!common_flags()->symbolize) {\n    VReport(2, \"Symbolizer is disabled.\\n\");\n    return;\n  }\n  if (IsAllocatorOutOfMemory()) {\n    VReport(2, \"Cannot use internal symbolizer: out of memory\\n\");\n  } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {\n    VReport(2, \"Using internal symbolizer.\\n\");\n    list->push_back(tool);\n    return;\n  }\n  if (SymbolizerTool *tool = LibbacktraceSymbolizer::get(allocator)) {\n    VReport(2, \"Using libbacktrace symbolizer.\\n\");\n    list->push_back(tool);\n    return;\n  }\n\n  if (SymbolizerTool *tool = ChooseExternalSymbolizer(allocator)) {\n    list->push_back(tool);\n  }\n\n#if SANITIZER_MAC\n  VReport(2, \"Using dladdr symbolizer.\\n\");\n  list->push_back(new(*allocator) DlAddrSymbolizer());\n#endif  // SANITIZER_MAC\n}\n\nSymbolizer *Symbolizer::PlatformInit() {\n  IntrusiveList<SymbolizerTool> list;\n  list.clear();\n  ChooseSymbolizerTools(&list, &symbolizer_allocator_);\n  return new(symbolizer_allocator_) Symbolizer(list);\n}\n\nvoid Symbolizer::LateInitialize() {\n  Symbolizer::GetOrInit();\n  InitializeSwiftDemangler();\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_POSIX\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_report.cpp",
    "content": "//===-- sanitizer_symbolizer_report.cpp -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n///\n/// This file is shared between AddressSanitizer and other sanitizer run-time\n/// libraries and implements symbolized reports related functions.\n///\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_procmaps.h\"\n#include \"sanitizer_report_decorator.h\"\n#include \"sanitizer_stacktrace.h\"\n#include \"sanitizer_stacktrace_printer.h\"\n#include \"sanitizer_symbolizer.h\"\n\n#if SANITIZER_POSIX\n# include \"sanitizer_posix.h\"\n# include <sys/mman.h>\n#endif\n\nnamespace __sanitizer {\n\n#if !SANITIZER_GO\nvoid ReportErrorSummary(const char *error_type, const AddressInfo &info,\n                        const char *alt_tool_name) {\n  if (!common_flags()->print_summary) return;\n  InternalScopedString buff;\n  buff.append(\"%s \", error_type);\n  RenderFrame(&buff, \"%L %F\", 0, info.address, &info,\n              common_flags()->symbolize_vs_style,\n              common_flags()->strip_path_prefix);\n  ReportErrorSummary(buff.data(), alt_tool_name);\n}\n#endif\n\n#if !SANITIZER_FUCHSIA\n\nbool ReportFile::SupportsColors() {\n  SpinMutexLock l(mu);\n  ReopenIfNecessary();\n  return SupportsColoredOutput(fd);\n}\n\nstatic inline bool ReportSupportsColors() {\n  return report_file.SupportsColors();\n}\n\n#else  // SANITIZER_FUCHSIA\n\n// Fuchsia's logs always go through post-processing that handles colorization.\nstatic inline bool ReportSupportsColors() { return true; }\n\n#endif  // !SANITIZER_FUCHSIA\n\nbool ColorizeReports() {\n  // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color\n  // printing on Windows.\n  if (SANITIZER_WINDOWS)\n    return false;\n\n  const char *flag = common_flags()->color;\n  return internal_strcmp(flag, \"always\") == 0 ||\n         (internal_strcmp(flag, \"auto\") == 0 && ReportSupportsColors());\n}\n\nvoid ReportErrorSummary(const char *error_type, const StackTrace *stack,\n                        const char *alt_tool_name) {\n#if !SANITIZER_GO\n  if (!common_flags()->print_summary)\n    return;\n  if (stack->size == 0) {\n    ReportErrorSummary(error_type);\n    return;\n  }\n  // Currently, we include the first stack frame into the report summary.\n  // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).\n  uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);\n  SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);\n  ReportErrorSummary(error_type, frame->info, alt_tool_name);\n  frame->ClearAll();\n#endif\n}\n\nvoid ReportMmapWriteExec(int prot, int flags) {\n#if SANITIZER_POSIX && (!SANITIZER_GO && !SANITIZER_ANDROID)\n  int pflags = (PROT_WRITE | PROT_EXEC);\n  if ((prot & pflags) != pflags)\n    return;\n\n#  if SANITIZER_MAC && defined(MAP_JIT)\n  if ((flags & MAP_JIT) == MAP_JIT)\n    return;\n#  endif\n\n  ScopedErrorReportLock l;\n  SanitizerCommonDecorator d;\n\n  InternalMmapVector<BufferedStackTrace> stack_buffer(1);\n  BufferedStackTrace *stack = stack_buffer.data();\n  stack->Reset();\n  uptr top = 0;\n  uptr bottom = 0;\n  GET_CALLER_PC_BP_SP;\n  (void)sp;\n  bool fast = common_flags()->fast_unwind_on_fatal;\n  if (StackTrace::WillUseFastUnwind(fast)) {\n    GetThreadStackTopAndBottom(false, &top, &bottom);\n    stack->Unwind(kStackTraceMax, pc, bp, nullptr, top, bottom, true);\n  } else {\n    stack->Unwind(kStackTraceMax, pc, 0, nullptr, 0, 0, false);\n  }\n\n  Printf(\"%s\", d.Warning());\n  Report(\"WARNING: %s: writable-executable page usage\\n\", SanitizerToolName);\n  Printf(\"%s\", d.Default());\n\n  stack->Print();\n  ReportErrorSummary(\"w-and-x-usage\", stack);\n#endif\n}\n\n#if !SANITIZER_FUCHSIA && !SANITIZER_GO\nvoid StartReportDeadlySignal() {\n  // Write the first message using fd=2, just in case.\n  // It may actually fail to write in case stderr is closed.\n  CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName));\n  static const char kDeadlySignal[] = \":DEADLYSIGNAL\\n\";\n  CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1);\n}\n\nstatic void MaybeReportNonExecRegion(uptr pc) {\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD\n  MemoryMappingLayout proc_maps(/*cache_enabled*/ true);\n  MemoryMappedSegment segment;\n  while (proc_maps.Next(&segment)) {\n    if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())\n      Report(\"Hint: PC is at a non-executable region. Maybe a wild jump?\\n\");\n  }\n#endif\n}\n\nstatic void PrintMemoryByte(InternalScopedString *str, const char *before,\n                            u8 byte) {\n  SanitizerCommonDecorator d;\n  str->append(\"%s%s%x%x%s \", before, d.MemoryByte(), byte >> 4, byte & 15,\n              d.Default());\n}\n\nstatic void MaybeDumpInstructionBytes(uptr pc) {\n  if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))\n    return;\n  InternalScopedString str;\n  str.append(\"First 16 instruction bytes at pc: \");\n  if (IsAccessibleMemoryRange(pc, 16)) {\n    for (int i = 0; i < 16; ++i) {\n      PrintMemoryByte(&str, \"\", ((u8 *)pc)[i]);\n    }\n    str.append(\"\\n\");\n  } else {\n    str.append(\"unaccessible\\n\");\n  }\n  Report(\"%s\", str.data());\n}\n\nstatic void MaybeDumpRegisters(void *context) {\n  if (!common_flags()->dump_registers) return;\n  SignalContext::DumpAllRegisters(context);\n}\n\nstatic void ReportStackOverflowImpl(const SignalContext &sig, u32 tid,\n                                    UnwindSignalStackCallbackType unwind,\n                                    const void *unwind_context) {\n  SanitizerCommonDecorator d;\n  Printf(\"%s\", d.Warning());\n  static const char kDescription[] = \"stack-overflow\";\n  Report(\"ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\\n\",\n         SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc,\n         (void *)sig.bp, (void *)sig.sp, tid);\n  Printf(\"%s\", d.Default());\n  InternalMmapVector<BufferedStackTrace> stack_buffer(1);\n  BufferedStackTrace *stack = stack_buffer.data();\n  stack->Reset();\n  unwind(sig, unwind_context, stack);\n  stack->Print();\n  ReportErrorSummary(kDescription, stack);\n}\n\nstatic void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,\n                                   UnwindSignalStackCallbackType unwind,\n                                   const void *unwind_context) {\n  SanitizerCommonDecorator d;\n  Printf(\"%s\", d.Warning());\n  const char *description = sig.Describe();\n  if (sig.is_memory_access && !sig.is_true_faulting_addr)\n    Report(\"ERROR: %s: %s on unknown address (pc %p bp %p sp %p T%d)\\n\",\n           SanitizerToolName, description, (void *)sig.pc, (void *)sig.bp,\n           (void *)sig.sp, tid);\n  else\n    Report(\"ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\\n\",\n           SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc,\n           (void *)sig.bp, (void *)sig.sp, tid);\n  Printf(\"%s\", d.Default());\n  if (sig.pc < GetPageSizeCached())\n    Report(\"Hint: pc points to the zero page.\\n\");\n  if (sig.is_memory_access) {\n    const char *access_type =\n        sig.write_flag == SignalContext::Write\n            ? \"WRITE\"\n            : (sig.write_flag == SignalContext::Read ? \"READ\" : \"UNKNOWN\");\n    Report(\"The signal is caused by a %s memory access.\\n\", access_type);\n    if (!sig.is_true_faulting_addr)\n      Report(\"Hint: this fault was caused by a dereference of a high value \"\n             \"address (see register values below).  Disassemble the provided \"\n             \"pc to learn which register was used.\\n\");\n    else if (sig.addr < GetPageSizeCached())\n      Report(\"Hint: address points to the zero page.\\n\");\n  }\n  MaybeReportNonExecRegion(sig.pc);\n  InternalMmapVector<BufferedStackTrace> stack_buffer(1);\n  BufferedStackTrace *stack = stack_buffer.data();\n  stack->Reset();\n  unwind(sig, unwind_context, stack);\n  stack->Print();\n  MaybeDumpInstructionBytes(sig.pc);\n  MaybeDumpRegisters(sig.context);\n  Printf(\"%s can not provide additional info.\\n\", SanitizerToolName);\n  ReportErrorSummary(description, stack);\n}\n\nvoid ReportDeadlySignal(const SignalContext &sig, u32 tid,\n                        UnwindSignalStackCallbackType unwind,\n                        const void *unwind_context) {\n  if (sig.IsStackOverflow())\n    ReportStackOverflowImpl(sig, tid, unwind, unwind_context);\n  else\n    ReportDeadlySignalImpl(sig, tid, unwind, unwind_context);\n}\n\nvoid HandleDeadlySignal(void *siginfo, void *context, u32 tid,\n                        UnwindSignalStackCallbackType unwind,\n                        const void *unwind_context) {\n  StartReportDeadlySignal();\n  ScopedErrorReportLock rl;\n  SignalContext sig(siginfo, context);\n  ReportDeadlySignal(sig, tid, unwind, unwind_context);\n  Report(\"ABORTING\\n\");\n  Die();\n}\n\n#endif  // !SANITIZER_FUCHSIA && !SANITIZER_GO\n\natomic_uintptr_t ScopedErrorReportLock::reporting_thread_ = {0};\nStaticSpinMutex ScopedErrorReportLock::mutex_;\n\nvoid ScopedErrorReportLock::Lock() {\n  uptr current = GetThreadSelf();\n  for (;;) {\n    uptr expected = 0;\n    if (atomic_compare_exchange_strong(&reporting_thread_, &expected, current,\n                                       memory_order_relaxed)) {\n      // We've claimed reporting_thread so proceed.\n      mutex_.Lock();\n      return;\n    }\n\n    if (expected == current) {\n      // This is either asynch signal or nested error during error reporting.\n      // Fail simple to avoid deadlocks in Report().\n\n      // Can't use Report() here because of potential deadlocks in nested\n      // signal handlers.\n      CatastrophicErrorWrite(SanitizerToolName,\n                             internal_strlen(SanitizerToolName));\n      static const char msg[] = \": nested bug in the same thread, aborting.\\n\";\n      CatastrophicErrorWrite(msg, sizeof(msg) - 1);\n\n      internal__exit(common_flags()->exitcode);\n    }\n\n    internal_sched_yield();\n  }\n}\n\nvoid ScopedErrorReportLock::Unlock() {\n  mutex_.Unlock();\n  atomic_store_relaxed(&reporting_thread_, 0);\n}\n\nvoid ScopedErrorReportLock::CheckLocked() { mutex_.CheckLocked(); }\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_rtems.h",
    "content": "//===-- sanitizer_symbolizer_rtems.h -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between various sanitizers' runtime libraries.\n//\n// Define RTEMS's string formats and limits for the markup symbolizer.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_SYMBOLIZER_RTEMS_H\n#define SANITIZER_SYMBOLIZER_RTEMS_H\n\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\n// The Myriad RTEMS symbolizer currently only parses backtrace lines,\n// so use a format that the symbolizer understands.  For other\n// markups, keep them the same as the Fuchsia's.\n\n// This is used by UBSan for type names, and by ASan for global variable names.\nconstexpr const char *kFormatDemangle = \"{{{symbol:%s}}}\";\nconstexpr uptr kFormatDemangleMax = 1024;  // Arbitrary.\n\n// Function name or equivalent from PC location.\nconstexpr const char *kFormatFunction = \"{{{pc:%p}}}\";\nconstexpr uptr kFormatFunctionMax = 64;  // More than big enough for 64-bit hex.\n\n// Global variable name or equivalent from data memory address.\nconstexpr const char *kFormatData = \"{{{data:%p}}}\";\n\n// One frame in a backtrace (printed on a line by itself).\nconstexpr const char *kFormatFrame = \"    [%u] IP: %p\";\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_SYMBOLIZER_RTEMS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_symbolizer_win.cpp",
    "content": "//===-- sanitizer_symbolizer_win.cpp --------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries.\n// Windows-specific implementation of symbolizer parts.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS\n\n#include \"sanitizer_dbghelp.h\"\n#include \"sanitizer_symbolizer_internal.h\"\n\nnamespace __sanitizer {\n\ndecltype(::StackWalk64) *StackWalk64;\ndecltype(::SymCleanup) *SymCleanup;\ndecltype(::SymFromAddr) *SymFromAddr;\ndecltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;\ndecltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;\ndecltype(::SymGetModuleBase64) *SymGetModuleBase64;\ndecltype(::SymGetSearchPathW) *SymGetSearchPathW;\ndecltype(::SymInitialize) *SymInitialize;\ndecltype(::SymSetOptions) *SymSetOptions;\ndecltype(::SymSetSearchPathW) *SymSetSearchPathW;\ndecltype(::UnDecorateSymbolName) *UnDecorateSymbolName;\n\nnamespace {\n\nclass WinSymbolizerTool final : public SymbolizerTool {\n public:\n  // The constructor is provided to avoid synthesized memsets.\n  WinSymbolizerTool() {}\n\n  bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;\n  bool SymbolizeData(uptr addr, DataInfo *info) override {\n    return false;\n  }\n  const char *Demangle(const char *name) override;\n};\n\nbool is_dbghelp_initialized = false;\n\nbool TrySymInitialize() {\n  SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);\n  return SymInitialize(GetCurrentProcess(), 0, TRUE);\n  // FIXME: We don't call SymCleanup() on exit yet - should we?\n}\n\n}  // namespace\n\n// Initializes DbgHelp library, if it's not yet initialized. Calls to this\n// function should be synchronized with respect to other calls to DbgHelp API\n// (e.g. from WinSymbolizerTool).\nvoid InitializeDbgHelpIfNeeded() {\n  if (is_dbghelp_initialized)\n    return;\n\n  HMODULE dbghelp = LoadLibraryA(\"dbghelp.dll\");\n  CHECK(dbghelp && \"failed to load dbghelp.dll\");\n\n#define DBGHELP_IMPORT(name)                                                  \\\n  do {                                                                        \\\n    name =                                                                    \\\n        reinterpret_cast<decltype(::name) *>(GetProcAddress(dbghelp, #name)); \\\n    CHECK(name != nullptr);                                                   \\\n  } while (0)\n  DBGHELP_IMPORT(StackWalk64);\n  DBGHELP_IMPORT(SymCleanup);\n  DBGHELP_IMPORT(SymFromAddr);\n  DBGHELP_IMPORT(SymFunctionTableAccess64);\n  DBGHELP_IMPORT(SymGetLineFromAddr64);\n  DBGHELP_IMPORT(SymGetModuleBase64);\n  DBGHELP_IMPORT(SymGetSearchPathW);\n  DBGHELP_IMPORT(SymInitialize);\n  DBGHELP_IMPORT(SymSetOptions);\n  DBGHELP_IMPORT(SymSetSearchPathW);\n  DBGHELP_IMPORT(UnDecorateSymbolName);\n#undef DBGHELP_IMPORT\n\n  if (!TrySymInitialize()) {\n    // OK, maybe the client app has called SymInitialize already.\n    // That's a bit unfortunate for us as all the DbgHelp functions are\n    // single-threaded and we can't coordinate with the app.\n    // FIXME: Can we stop the other threads at this point?\n    // Anyways, we have to reconfigure stuff to make sure that SymInitialize\n    // has all the appropriate options set.\n    // Cross our fingers and reinitialize DbgHelp.\n    Report(\"*** WARNING: Failed to initialize DbgHelp!              ***\\n\");\n    Report(\"*** Most likely this means that the app is already      ***\\n\");\n    Report(\"*** using DbgHelp, possibly with incompatible flags.    ***\\n\");\n    Report(\"*** Due to technical reasons, symbolization might crash ***\\n\");\n    Report(\"*** or produce wrong results.                           ***\\n\");\n    SymCleanup(GetCurrentProcess());\n    TrySymInitialize();\n  }\n  is_dbghelp_initialized = true;\n\n  // When an executable is run from a location different from the one where it\n  // was originally built, we may not see the nearby PDB files.\n  // To work around this, let's append the directory of the main module\n  // to the symbol search path.  All the failures below are not fatal.\n  const size_t kSymPathSize = 2048;\n  static wchar_t path_buffer[kSymPathSize + 1 + MAX_PATH];\n  if (!SymGetSearchPathW(GetCurrentProcess(), path_buffer, kSymPathSize)) {\n    Report(\"*** WARNING: Failed to SymGetSearchPathW ***\\n\");\n    return;\n  }\n  size_t sz = wcslen(path_buffer);\n  if (sz) {\n    CHECK_EQ(0, wcscat_s(path_buffer, L\";\"));\n    sz++;\n  }\n  DWORD res = GetModuleFileNameW(NULL, path_buffer + sz, MAX_PATH);\n  if (res == 0 || res == MAX_PATH) {\n    Report(\"*** WARNING: Failed to getting the EXE directory ***\\n\");\n    return;\n  }\n  // Write the zero character in place of the last backslash to get the\n  // directory of the main module at the end of path_buffer.\n  wchar_t *last_bslash = wcsrchr(path_buffer + sz, L'\\\\');\n  CHECK_NE(last_bslash, 0);\n  *last_bslash = L'\\0';\n  if (!SymSetSearchPathW(GetCurrentProcess(), path_buffer)) {\n    Report(\"*** WARNING: Failed to SymSetSearchPathW\\n\");\n    return;\n  }\n}\n\nbool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {\n  InitializeDbgHelpIfNeeded();\n\n  // See https://docs.microsoft.com/en-us/windows/win32/debug/retrieving-symbol-information-by-address\n  InternalMmapVector<char> buffer(sizeof(SYMBOL_INFO) +\n                                  MAX_SYM_NAME * sizeof(CHAR));\n  PSYMBOL_INFO symbol = (PSYMBOL_INFO)&buffer[0];\n  symbol->SizeOfStruct = sizeof(SYMBOL_INFO);\n  symbol->MaxNameLen = MAX_SYM_NAME;\n  DWORD64 offset = 0;\n  BOOL got_objname = SymFromAddr(GetCurrentProcess(),\n                                 (DWORD64)addr, &offset, symbol);\n  if (!got_objname)\n    return false;\n\n  DWORD unused;\n  IMAGEHLP_LINE64 line_info;\n  line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);\n  BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,\n                                           &unused, &line_info);\n  frame->info.function = internal_strdup(symbol->Name);\n  frame->info.function_offset = (uptr)offset;\n  if (got_fileline) {\n    frame->info.file = internal_strdup(line_info.FileName);\n    frame->info.line = line_info.LineNumber;\n  }\n  // Only consider this a successful symbolization attempt if we got file info.\n  // Otherwise, try llvm-symbolizer.\n  return got_fileline;\n}\n\nconst char *WinSymbolizerTool::Demangle(const char *name) {\n  CHECK(is_dbghelp_initialized);\n  static char demangle_buffer[1000];\n  if (name[0] == '\\01' &&\n      UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer),\n                           UNDNAME_NAME_ONLY))\n    return demangle_buffer;\n  else\n    return name;\n}\n\nconst char *Symbolizer::PlatformDemangle(const char *name) {\n  return name;\n}\n\nnamespace {\nstruct ScopedHandle {\n  ScopedHandle() : h_(nullptr) {}\n  explicit ScopedHandle(HANDLE h) : h_(h) {}\n  ~ScopedHandle() {\n    if (h_)\n      ::CloseHandle(h_);\n  }\n  HANDLE get() { return h_; }\n  HANDLE *receive() { return &h_; }\n  HANDLE release() {\n    HANDLE h = h_;\n    h_ = nullptr;\n    return h;\n  }\n  HANDLE h_;\n};\n} // namespace\n\nbool SymbolizerProcess::StartSymbolizerSubprocess() {\n  // Create inherited pipes for stdin and stdout.\n  ScopedHandle stdin_read, stdin_write;\n  ScopedHandle stdout_read, stdout_write;\n  SECURITY_ATTRIBUTES attrs;\n  attrs.nLength = sizeof(SECURITY_ATTRIBUTES);\n  attrs.bInheritHandle = TRUE;\n  attrs.lpSecurityDescriptor = nullptr;\n  if (!::CreatePipe(stdin_read.receive(), stdin_write.receive(), &attrs, 0) ||\n      !::CreatePipe(stdout_read.receive(), stdout_write.receive(), &attrs, 0)) {\n    VReport(2, \"WARNING: %s CreatePipe failed (error code: %d)\\n\",\n            SanitizerToolName, path_, GetLastError());\n    return false;\n  }\n\n  // Don't inherit the writing end of stdin or the reading end of stdout.\n  if (!SetHandleInformation(stdin_write.get(), HANDLE_FLAG_INHERIT, 0) ||\n      !SetHandleInformation(stdout_read.get(), HANDLE_FLAG_INHERIT, 0)) {\n    VReport(2, \"WARNING: %s SetHandleInformation failed (error code: %d)\\n\",\n            SanitizerToolName, path_, GetLastError());\n    return false;\n  }\n\n  // Compute the command line. Wrap double quotes around everything.\n  const char *argv[kArgVMax];\n  GetArgV(path_, argv);\n  InternalScopedString command_line;\n  for (int i = 0; argv[i]; i++) {\n    const char *arg = argv[i];\n    int arglen = internal_strlen(arg);\n    // Check that tool command lines are simple and that complete escaping is\n    // unnecessary.\n    CHECK(!internal_strchr(arg, '\"') && \"quotes in args unsupported\");\n    CHECK(!internal_strstr(arg, \"\\\\\\\\\") &&\n          \"double backslashes in args unsupported\");\n    CHECK(arglen > 0 && arg[arglen - 1] != '\\\\' &&\n          \"args ending in backslash and empty args unsupported\");\n    command_line.append(\"\\\"%s\\\" \", arg);\n  }\n  VReport(3, \"Launching symbolizer command: %s\\n\", command_line.data());\n\n  // Launch llvm-symbolizer with stdin and stdout redirected.\n  STARTUPINFOA si;\n  memset(&si, 0, sizeof(si));\n  si.cb = sizeof(si);\n  si.dwFlags |= STARTF_USESTDHANDLES;\n  si.hStdInput = stdin_read.get();\n  si.hStdOutput = stdout_write.get();\n  PROCESS_INFORMATION pi;\n  memset(&pi, 0, sizeof(pi));\n  if (!CreateProcessA(path_,               // Executable\n                      command_line.data(), // Command line\n                      nullptr,             // Process handle not inheritable\n                      nullptr,             // Thread handle not inheritable\n                      TRUE,                // Set handle inheritance to TRUE\n                      0,                   // Creation flags\n                      nullptr,             // Use parent's environment block\n                      nullptr,             // Use parent's starting directory\n                      &si, &pi)) {\n    VReport(2, \"WARNING: %s failed to create process for %s (error code: %d)\\n\",\n            SanitizerToolName, path_, GetLastError());\n    return false;\n  }\n\n  // Process creation succeeded, so transfer handle ownership into the fields.\n  input_fd_ = stdout_read.release();\n  output_fd_ = stdin_write.release();\n\n  // The llvm-symbolizer process is responsible for quitting itself when the\n  // stdin pipe is closed, so we don't need these handles. Close them to prevent\n  // leaks. If we ever want to try to kill the symbolizer process from the\n  // parent, we'll want to hang on to these handles.\n  CloseHandle(pi.hProcess);\n  CloseHandle(pi.hThread);\n  return true;\n}\n\nstatic void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,\n                                  LowLevelAllocator *allocator) {\n  if (!common_flags()->symbolize) {\n    VReport(2, \"Symbolizer is disabled.\\n\");\n    return;\n  }\n\n  // Add llvm-symbolizer.\n  const char *user_path = common_flags()->external_symbolizer_path;\n\n  if (user_path && internal_strchr(user_path, '%')) {\n    char *new_path = (char *)InternalAlloc(kMaxPathLength);\n    SubstituteForFlagValue(user_path, new_path, kMaxPathLength);\n    user_path = new_path;\n  }\n\n  const char *path =\n      user_path ? user_path : FindPathToBinary(\"llvm-symbolizer.exe\");\n  if (path) {\n    VReport(2, \"Using llvm-symbolizer at %spath: %s\\n\",\n            user_path ? \"user-specified \" : \"\", path);\n    list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));\n  } else {\n    if (user_path && user_path[0] == '\\0') {\n      VReport(2, \"External symbolizer is explicitly disabled.\\n\");\n    } else {\n      VReport(2, \"External symbolizer is not present.\\n\");\n    }\n  }\n\n  // Add the dbghelp based symbolizer.\n  list->push_back(new(*allocator) WinSymbolizerTool());\n}\n\nSymbolizer *Symbolizer::PlatformInit() {\n  IntrusiveList<SymbolizerTool> list;\n  list.clear();\n  ChooseSymbolizerTools(&list, &symbolizer_allocator_);\n\n  return new(symbolizer_allocator_) Symbolizer(list);\n}\n\nvoid Symbolizer::LateInitialize() {\n  Symbolizer::GetOrInit();\n}\n\n}  // namespace __sanitizer\n\n#endif  // _WIN32\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscall_generic.inc",
    "content": "//===-- sanitizer_syscall_generic.inc ---------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Generic implementations of internal_syscall* and internal_iserror.\n//\n//===----------------------------------------------------------------------===//\n\n// NetBSD uses libc calls directly\n#if !SANITIZER_NETBSD\n\n#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_SOLARIS\n# define SYSCALL(name) SYS_ ## name\n#else\n# define SYSCALL(name) __NR_ ## name\n#endif\n\n#if defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_MAC)\n# define internal_syscall __syscall\n# else\n# define internal_syscall syscall\n#endif\n\n#endif\n\nbool internal_iserror(uptr retval, int *rverrno) {\n  if (retval == (uptr)-1) {\n    if (rverrno)\n      *rverrno = errno;\n    return true;\n  } else {\n    return false;\n  }\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscall_linux_aarch64.inc",
    "content": "//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementations of internal_syscall and internal_iserror for Linux/aarch64.\n//\n//===----------------------------------------------------------------------===//\n\n#define SYSCALL(name) __NR_ ## name\n\nstatic uptr __internal_syscall(u64 nr) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\");\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall0(n) \\\n  (__internal_syscall)(n)\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\") = arg1;\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8), \"0\"(x0)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall1(n, a1) \\\n  (__internal_syscall)(n, (u64)(a1))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\") = arg1;\n  register u64 x1 asm(\"x1\") = arg2;\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8), \"0\"(x0), \"r\"(x1)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall2(n, a1, a2) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\") = arg1;\n  register u64 x1 asm(\"x1\") = arg2;\n  register u64 x2 asm(\"x2\") = arg3;\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8), \"0\"(x0), \"r\"(x1), \"r\"(x2)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall3(n, a1, a2, a3) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,\n                               u64 arg4) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\") = arg1;\n  register u64 x1 asm(\"x1\") = arg2;\n  register u64 x2 asm(\"x2\") = arg3;\n  register u64 x3 asm(\"x3\") = arg4;\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8), \"0\"(x0), \"r\"(x1), \"r\"(x2), \"r\"(x3)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall4(n, a1, a2, a3, a4) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,\n                               u64 arg4, long arg5) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\") = arg1;\n  register u64 x1 asm(\"x1\") = arg2;\n  register u64 x2 asm(\"x2\") = arg3;\n  register u64 x3 asm(\"x3\") = arg4;\n  register u64 x4 asm(\"x4\") = arg5;\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8), \"0\"(x0), \"r\"(x1), \"r\"(x2), \"r\"(x3), \"r\"(x4)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall5(n, a1, a2, a3, a4, a5) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u64)(a5))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,\n                               u64 arg4, long arg5, long arg6) {\n  register u64 x8 asm(\"x8\") = nr;\n  register u64 x0 asm(\"x0\") = arg1;\n  register u64 x1 asm(\"x1\") = arg2;\n  register u64 x2 asm(\"x2\") = arg3;\n  register u64 x3 asm(\"x3\") = arg4;\n  register u64 x4 asm(\"x4\") = arg5;\n  register u64 x5 asm(\"x5\") = arg6;\n  asm volatile(\"svc 0\"\n               : \"=r\"(x0)\n               : \"r\"(x8), \"0\"(x0), \"r\"(x1), \"r\"(x2), \"r\"(x3), \"r\"(x4), \"r\"(x5)\n               : \"memory\", \"cc\");\n  return x0;\n}\n#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u64)(a5), (long)(a6))\n\n#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n\n#define __SYSCALL_NARGS(...) \\\n  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )\n#define __SYSCALL_CONCAT_X(a, b) a##b\n#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)\n#define __SYSCALL_DISP(b, ...) \\\n  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)\n\n#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)\n\n// Helper function used to avoid cobbler errno.\nbool internal_iserror(uptr retval, int *rverrno) {\n  if (retval >= (uptr)-4095) {\n    if (rverrno)\n      *rverrno = -retval;\n    return true;\n  }\n  return false;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscall_linux_arm.inc",
    "content": "//===-- sanitizer_syscall_linux_arm.inc -------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementations of internal_syscall and internal_iserror for Linux/arm.\n//\n//===----------------------------------------------------------------------===//\n\n#define SYSCALL(name) __NR_ ## name\n\nstatic uptr __internal_syscall(u32 nr) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\");\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall0(n) \\\n  (__internal_syscall)(n)\n\nstatic uptr __internal_syscall(u32 nr, u32 arg1) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\") = arg1;\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8), \"0\"(r0)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall1(n, a1) \\\n  (__internal_syscall)(n, (u32)(a1))\n\nstatic uptr __internal_syscall(u32 nr, u32 arg1, long arg2) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\") = arg1;\n  register u32 r1 asm(\"r1\") = arg2;\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8), \"0\"(r0), \"r\"(r1)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall2(n, a1, a2) \\\n  (__internal_syscall)(n, (u32)(a1), (long)(a2))\n\nstatic uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\") = arg1;\n  register u32 r1 asm(\"r1\") = arg2;\n  register u32 r2 asm(\"r2\") = arg3;\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8), \"0\"(r0), \"r\"(r1), \"r\"(r2)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall3(n, a1, a2, a3) \\\n  (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3))\n\nstatic uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,\n                               u32 arg4) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\") = arg1;\n  register u32 r1 asm(\"r1\") = arg2;\n  register u32 r2 asm(\"r2\") = arg3;\n  register u32 r3 asm(\"r3\") = arg4;\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8), \"0\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall4(n, a1, a2, a3, a4) \\\n  (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4))\n\nstatic uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,\n                               u32 arg4, long arg5) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\") = arg1;\n  register u32 r1 asm(\"r1\") = arg2;\n  register u32 r2 asm(\"r2\") = arg3;\n  register u32 r3 asm(\"r3\") = arg4;\n  register u32 r4 asm(\"r4\") = arg5;\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8), \"0\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3), \"r\"(r4)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall5(n, a1, a2, a3, a4, a5) \\\n  (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u32)(a5))\n\nstatic uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,\n                               u32 arg4, long arg5, long arg6) {\n  register u32 r8 asm(\"r7\") = nr;\n  register u32 r0 asm(\"r0\") = arg1;\n  register u32 r1 asm(\"r1\") = arg2;\n  register u32 r2 asm(\"r2\") = arg3;\n  register u32 r3 asm(\"r3\") = arg4;\n  register u32 r4 asm(\"r4\") = arg5;\n  register u32 r5 asm(\"r5\") = arg6;\n  asm volatile(\"swi #0\"\n               : \"=r\"(r0)\n               : \"r\"(r8), \"0\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3), \"r\"(r4), \"r\"(r5)\n               : \"memory\", \"cc\");\n  return r0;\n}\n#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \\\n  (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u32)(a5), (long)(a6))\n\n#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n\n#define __SYSCALL_NARGS(...) \\\n  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )\n#define __SYSCALL_CONCAT_X(a, b) a##b\n#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)\n#define __SYSCALL_DISP(b, ...) \\\n  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)\n\n#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)\n\n// Helper function used to avoid cobbler errno.\nbool internal_iserror(uptr retval, int *rverrno) {\n  if (retval >= (uptr)-4095) {\n    if (rverrno)\n      *rverrno = -retval;\n    return true;\n  }\n  return false;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscall_linux_hexagon.inc",
    "content": "//===-- sanitizer_syscall_linux_hexagon.inc ---------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementations of internal_syscall and internal_iserror for Linux/hexagon.\n//\n//===----------------------------------------------------------------------===//\n\n#define SYSCALL(name) __NR_##name\n\n#define __internal_syscall_LL_E(x) \\\n  ((union {                        \\\n    long long ll;                  \\\n    long l[2];                     \\\n  }){.ll = x})                     \\\n      .l[0],                       \\\n      ((union {                    \\\n        long long ll;              \\\n        long l[2];                 \\\n      }){.ll = x})                 \\\n          .l[1]\n#define __internal_syscall_LL_O(x) 0, __SYSCALL_LL_E((x))\n\n#define __asm_syscall(...)                                                 \\\n  do {                                                                     \\\n    __asm__ __volatile__(\"trap0(#1)\" : \"=r\"(r0) : __VA_ARGS__ : \"memory\"); \\\n    return r0;                                                             \\\n  } while (0)\n\n#define __internal_syscall0(n) (__internal_syscall)(n)\n\nstatic uptr __internal_syscall(long n) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\");\n  __asm_syscall(\"r\"(r6));\n}\n\n#define __internal_syscall1(n, a1) (__internal_syscall)(n, (long)(a1))\n\nstatic uptr __internal_syscall(long n, long a) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\") = a;\n  __asm_syscall(\"r\"(r6), \"0\"(r0));\n}\n\n#define __internal_syscall2(n, a1, a2) \\\n  (__internal_syscall)(n, (long)(a1), (long)(a2))\n\nstatic uptr __internal_syscall(long n, long a, long b) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\") = a;\n  register u32 r1 __asm__(\"r1\") = b;\n  __asm_syscall(\"r\"(r6), \"0\"(r0), \"r\"(r1));\n}\n\n#define __internal_syscall3(n, a1, a2, a3) \\\n  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3))\n\nstatic uptr __internal_syscall(long n, long a, long b, long c) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\") = a;\n  register u32 r1 __asm__(\"r1\") = b;\n  register u32 r2 __asm__(\"r2\") = c;\n  __asm_syscall(\"r\"(r6), \"0\"(r0), \"r\"(r1), \"r\"(r2));\n}\n\n#define __internal_syscall4(n, a1, a2, a3, a4) \\\n  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4))\n\nstatic uptr __internal_syscall(long n, long a, long b, long c, long d) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\") = a;\n  register u32 r1 __asm__(\"r1\") = b;\n  register u32 r2 __asm__(\"r2\") = c;\n  register u32 r3 __asm__(\"r3\") = d;\n  __asm_syscall(\"r\"(r6), \"0\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3));\n}\n\n#define __internal_syscall5(n, a1, a2, a3, a4, a5)                        \\\n  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (long)(a5))\n\nstatic uptr __internal_syscall(long n, long a, long b, long c, long d, long e) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\") = a;\n  register u32 r1 __asm__(\"r1\") = b;\n  register u32 r2 __asm__(\"r2\") = c;\n  register u32 r3 __asm__(\"r3\") = d;\n  register u32 r4 __asm__(\"r4\") = e;\n  __asm_syscall(\"r\"(r6), \"0\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3), \"r\"(r4));\n}\n\n#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6)                    \\\n  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (long)(a5), (long)(a6))\n\nstatic uptr __internal_syscall(long n, long a, long b, long c, long d, long e,\n                               long f) {\n  register u32 r6 __asm__(\"r6\") = n;\n  register u32 r0 __asm__(\"r0\") = a;\n  register u32 r1 __asm__(\"r1\") = b;\n  register u32 r2 __asm__(\"r2\") = c;\n  register u32 r3 __asm__(\"r3\") = d;\n  register u32 r4 __asm__(\"r4\") = e;\n  register u32 r5 __asm__(\"r5\") = f;\n  __asm_syscall(\"r\"(r6), \"0\"(r0), \"r\"(r1), \"r\"(r2), \"r\"(r3), \"r\"(r4), \"r\"(r5));\n}\n\n#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n\n#define __SYSCALL_NARGS(...) \\\n  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )\n#define __SYSCALL_CONCAT_X(a, b) a##b\n#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)\n#define __SYSCALL_DISP(b, ...) \\\n  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)\n\n#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)\n\n// Helper function used to avoid clobbering of errno.\nbool internal_iserror(uptr retval, int *rverrno) {\n  if (retval >= (uptr)-4095) {\n    if (rverrno)\n      *rverrno = -retval;\n    return true;\n  }\n  return false;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscall_linux_riscv64.inc",
    "content": "//===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementations of internal_syscall and internal_iserror for Linux/riscv64.\n//\n//===----------------------------------------------------------------------===//\n\n// About local register variables:\n// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables\n//\n// Kernel ABI...\n// To my surprise I haven't found much information regarding it.\n// Kernel source and internet browsing shows that:\n//  syscall number is passed in a7\n//  (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in\n//  a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments\n//  are passed in: a0-a7 (see below)\n//\n//  Regarding the arguments. The only \"documentation\" I could find is\n//  this comment (!!!) by Bruce Hold on google forums (!!!):\n//    https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ\n//    Confirmed by inspecting glibc sources.\n//  Great way to document things.\n#define SYSCALL(name) __NR_##name\n\n#define INTERNAL_SYSCALL_CLOBBERS \"memory\"\n\nstatic uptr __internal_syscall(u64 nr) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\");\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"=r\"(a0)\n                   : \"r\"(a7)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall0(n) (__internal_syscall)(n)\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  register u64 a1 asm(\"a1\") = arg2;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7), \"r\"(a1)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall2(n, a1, a2) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  register u64 a1 asm(\"a1\") = arg2;\n  register u64 a2 asm(\"a2\") = arg3;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7), \"r\"(a1), \"r\"(a2)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall3(n, a1, a2, a3) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,\n                               u64 arg4) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  register u64 a1 asm(\"a1\") = arg2;\n  register u64 a2 asm(\"a2\") = arg3;\n  register u64 a3 asm(\"a3\") = arg4;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7), \"r\"(a1), \"r\"(a2), \"r\"(a3)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall4(n, a1, a2, a3, a4) \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,\n                               long arg5) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  register u64 a1 asm(\"a1\") = arg2;\n  register u64 a2 asm(\"a2\") = arg3;\n  register u64 a3 asm(\"a3\") = arg4;\n  register u64 a4 asm(\"a4\") = arg5;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7), \"r\"(a1), \"r\"(a2), \"r\"(a3), \"r\"(a4)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall5(n, a1, a2, a3, a4, a5)                       \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u64)(a5))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,\n                               long arg5, long arg6) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  register u64 a1 asm(\"a1\") = arg2;\n  register u64 a2 asm(\"a2\") = arg3;\n  register u64 a3 asm(\"a3\") = arg4;\n  register u64 a4 asm(\"a4\") = arg5;\n  register u64 a5 asm(\"a5\") = arg6;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7), \"r\"(a1), \"r\"(a2), \"r\"(a3), \"r\"(a4), \"r\"(a5)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6)                   \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u64)(a5), (long)(a6))\n\nstatic uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,\n                               long arg5, long arg6, long arg7) {\n  register u64 a7 asm(\"a7\") = nr;\n  register u64 a0 asm(\"a0\") = arg1;\n  register u64 a1 asm(\"a1\") = arg2;\n  register u64 a2 asm(\"a2\") = arg3;\n  register u64 a3 asm(\"a3\") = arg4;\n  register u64 a4 asm(\"a4\") = arg5;\n  register u64 a5 asm(\"a5\") = arg6;\n  register u64 a6 asm(\"a6\") = arg7;\n  __asm__ volatile(\"ecall\\n\\t\"\n                   : \"+r\"(a0)\n                   : \"r\"(a7), \"r\"(a1), \"r\"(a2), \"r\"(a3), \"r\"(a4), \"r\"(a5),\n                     \"r\"(a6)\n                   : INTERNAL_SYSCALL_CLOBBERS);\n  return a0;\n}\n#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7)               \\\n  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \\\n                       (u64)(a5), (long)(a6), (long)(a7))\n\n#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n\n#define __SYSCALL_NARGS(...) \\\n  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )\n#define __SYSCALL_CONCAT_X(a, b) a##b\n#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)\n#define __SYSCALL_DISP(b, ...) \\\n  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)\n\n#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)\n\n// Helper function used to avoid clobbering of errno.\nbool internal_iserror(uptr retval, int *rverrno) {\n  if (retval >= (uptr)-4095) {\n    if (rverrno)\n      *rverrno = -retval;\n    return true;\n  }\n  return false;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscall_linux_x86_64.inc",
    "content": "//===-- sanitizer_syscall_linux_x86_64.inc ----------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementations of internal_syscall and internal_iserror for Linux/x86_64.\n//\n//===----------------------------------------------------------------------===//\n\n#define SYSCALL(name) __NR_ ## name\n\nstatic uptr internal_syscall(u64 nr) {\n  u64 retval;\n  asm volatile(\"syscall\" : \"=a\"(retval) : \"a\"(nr) : \"rcx\", \"r11\",\n               \"memory\", \"cc\");\n  return retval;\n}\n\ntemplate <typename T1>\nstatic uptr internal_syscall(u64 nr, T1 arg1) {\n  u64 retval;\n  asm volatile(\"syscall\" : \"=a\"(retval) : \"a\"(nr), \"D\"((u64)arg1) :\n               \"rcx\", \"r11\", \"memory\", \"cc\");\n  return retval;\n}\n\ntemplate <typename T1, typename T2>\nstatic uptr internal_syscall(u64 nr, T1 arg1, T2 arg2) {\n  u64 retval;\n  asm volatile(\"syscall\" : \"=a\"(retval) : \"a\"(nr), \"D\"((u64)arg1),\n               \"S\"((u64)arg2) : \"rcx\", \"r11\", \"memory\", \"cc\");\n  return retval;\n}\n\ntemplate <typename T1, typename T2, typename T3>\nstatic uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3) {\n  u64 retval;\n  asm volatile(\"syscall\" : \"=a\"(retval) : \"a\"(nr), \"D\"((u64)arg1),\n               \"S\"((u64)arg2), \"d\"((u64)arg3) : \"rcx\", \"r11\", \"memory\", \"cc\");\n  return retval;\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4>\nstatic uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4) {\n  u64 retval;\n  asm volatile(\"mov %5, %%r10;\"\n               \"syscall\" : \"=a\"(retval) : \"a\"(nr), \"D\"((u64)arg1),\n               \"S\"((u64)arg2), \"d\"((u64)arg3), \"r\"((u64)arg4) :\n               \"rcx\", \"r11\", \"r10\", \"memory\", \"cc\");\n  return retval;\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\nstatic uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,\n                             T5 arg5) {\n  u64 retval;\n  asm volatile(\"mov %5, %%r10;\"\n               \"mov %6, %%r8;\"\n               \"syscall\" : \"=a\"(retval) : \"a\"(nr), \"D\"((u64)arg1),\n               \"S\"((u64)arg2), \"d\"((u64)arg3), \"r\"((u64)arg4), \"r\"((u64)arg5) :\n               \"rcx\", \"r11\", \"r10\", \"r8\", \"memory\", \"cc\");\n  return retval;\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n          typename T6>\nstatic uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,\n                             T5 arg5, T6 arg6) {\n  u64 retval;\n  asm volatile(\"mov %5, %%r10;\"\n               \"mov %6, %%r8;\"\n               \"mov %7, %%r9;\"\n               \"syscall\" : \"=a\"(retval) : \"a\"(nr), \"D\"((u64)arg1),\n               \"S\"((u64)arg2), \"d\"((u64)arg3), \"r\"((u64)arg4), \"r\"((u64)arg5),\n               \"r\"((u64)arg6) : \"rcx\", \"r11\", \"r10\", \"r8\", \"r9\",\n               \"memory\", \"cc\");\n  return retval;\n}\n\nbool internal_iserror(uptr retval, int *rverrno) {\n  if (retval >= (uptr)-4095) {\n    if (rverrno)\n      *rverrno = -retval;\n    return true;\n  }\n  return false;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_syscalls_netbsd.inc",
    "content": "//===-- sanitizer_syscalls_netbsd.inc ---------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common syscalls handlers for tools like AddressSanitizer,\n// ThreadSanitizer, MemorySanitizer, etc.\n//\n// This file should be included into the tool's interceptor file,\n// which has to define it's own macros:\n//   COMMON_SYSCALL_PRE_READ_RANGE\n//          Called in prehook for regions that will be read by the kernel and\n//          must be initialized.\n//   COMMON_SYSCALL_PRE_WRITE_RANGE\n//          Called in prehook for regions that will be written to by the kernel\n//          and must be addressable. The actual write range may be smaller than\n//          reported in the prehook. See POST_WRITE_RANGE.\n//   COMMON_SYSCALL_POST_READ_RANGE\n//          Called in posthook for regions that were read by the kernel. Does\n//          not make much sense.\n//   COMMON_SYSCALL_POST_WRITE_RANGE\n//          Called in posthook for regions that were written to by the kernel\n//          and are now initialized.\n//   COMMON_SYSCALL_ACQUIRE(addr)\n//          Acquire memory visibility from addr.\n//   COMMON_SYSCALL_RELEASE(addr)\n//          Release memory visibility to addr.\n//   COMMON_SYSCALL_FD_CLOSE(fd)\n//          Called before closing file descriptor fd.\n//   COMMON_SYSCALL_FD_ACQUIRE(fd)\n//          Acquire memory visibility from fd.\n//   COMMON_SYSCALL_FD_RELEASE(fd)\n//          Release memory visibility to fd.\n//   COMMON_SYSCALL_PRE_FORK()\n//          Called before fork syscall.\n//   COMMON_SYSCALL_POST_FORK(long long res)\n//          Called after fork syscall.\n//\n// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!\n//\n// Generated with: generate_netbsd_syscalls.awk\n// Generated date: 2020-09-10\n// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_NETBSD\n\n#include \"sanitizer_libc.h\"\n\n#define PRE_SYSCALL(name)                                                      \\\n  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name\n#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)\n#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)\n\n#define POST_SYSCALL(name)                                                     \\\n  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name\n#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)\n#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)\n\n#ifndef COMMON_SYSCALL_ACQUIRE\n#define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))\n#endif\n\n#ifndef COMMON_SYSCALL_RELEASE\n#define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))\n#endif\n\n#ifndef COMMON_SYSCALL_FD_CLOSE\n#define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))\n#endif\n\n#ifndef COMMON_SYSCALL_FD_ACQUIRE\n#define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))\n#endif\n\n#ifndef COMMON_SYSCALL_FD_RELEASE\n#define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))\n#endif\n\n#ifndef COMMON_SYSCALL_PRE_FORK\n#define COMMON_SYSCALL_PRE_FORK()                                              \\\n  {}\n#endif\n\n#ifndef COMMON_SYSCALL_POST_FORK\n#define COMMON_SYSCALL_POST_FORK(res)                                          \\\n  {}\n#endif\n\n// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).\n\nextern \"C\" {\n#define SYS_MAXSYSARGS 8\nPRE_SYSCALL(syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(syscall)\n(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(exit)(long long rval_) { /* Nothing to do */ }\nPOST_SYSCALL(exit)(long long res, long long rval_) { /* Nothing to do */ }\nPRE_SYSCALL(fork)(void) { COMMON_SYSCALL_PRE_FORK(); }\nPOST_SYSCALL(fork)(long long res) { COMMON_SYSCALL_POST_FORK(res); }\nPRE_SYSCALL(read)(long long fd_, void *buf_, long long nbyte_) {\n  if (buf_) {\n    PRE_WRITE(buf_, nbyte_);\n  }\n}\nPOST_SYSCALL(read)(long long res, long long fd_, void *buf_, long long nbyte_) {\n  if (res > 0) {\n    POST_WRITE(buf_, res);\n  }\n}\nPRE_SYSCALL(write)(long long fd_, void *buf_, long long nbyte_) {\n  if (buf_) {\n    PRE_READ(buf_, nbyte_);\n  }\n}\nPOST_SYSCALL(write)\n(long long res, long long fd_, void *buf_, long long nbyte_) {\n  if (res > 0) {\n    POST_READ(buf_, res);\n  }\n}\nPRE_SYSCALL(open)(void *path_, long long flags_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(open)\n(long long res, void *path_, long long flags_, long long mode_) {\n  if (res > 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(close)(long long fd_) { COMMON_SYSCALL_FD_CLOSE((int)fd_); }\nPOST_SYSCALL(close)(long long res, long long fd_) { /* Nothing to do */ }\nPRE_SYSCALL(compat_50_wait4)\n(long long pid_, void *status_, long long options_, void *rusage_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_wait4)\n(long long res, long long pid_, void *status_, long long options_,\n  void *rusage_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ocreat)(void *path_, long long mode_) { /* TODO */ }\nPOST_SYSCALL(compat_43_ocreat)(long long res, void *path_, long long mode_) {\n  /* TODO */\n}\nPRE_SYSCALL(link)(void *path_, void *link_) {\n  const char *path = (const char *)path_;\n  const char *link = (const char *)link_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (link) {\n    PRE_READ(path, __sanitizer::internal_strlen(link) + 1);\n  }\n}\nPOST_SYSCALL(link)(long long res, void *path_, void *link_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    const char *link = (const char *)link_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n    if (link) {\n      POST_READ(path, __sanitizer::internal_strlen(link) + 1);\n    }\n  }\n}\nPRE_SYSCALL(unlink)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(unlink)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\n/* syscall 11 has been skipped */\nPRE_SYSCALL(chdir)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(chdir)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(fchdir)(long long fd_) { /* Nothing to do */ }\nPOST_SYSCALL(fchdir)(long long res, long long fd_) { /* Nothing to do */ }\nPRE_SYSCALL(compat_50_mknod)(void *path_, long long mode_, long long dev_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_mknod)\n(long long res, void *path_, long long mode_, long long dev_) {\n  /* TODO */\n}\nPRE_SYSCALL(chmod)(void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(chmod)(long long res, void *path_, long long mode_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(chown)(void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(chown)\n(long long res, void *path_, long long uid_, long long gid_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(break)(void *nsize_) { /* Nothing to do */ }\nPOST_SYSCALL(break)(long long res, void *nsize_) { /* Nothing to do */ }\nPRE_SYSCALL(compat_20_getfsstat)\n(void *buf_, long long bufsize_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_20_getfsstat)\n(long long res, void *buf_, long long bufsize_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_olseek)\n(long long fd_, long long offset_, long long whence_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_olseek)\n(long long res, long long fd_, long long offset_, long long whence_) {\n  /* TODO */\n}\nPRE_SYSCALL(getpid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(getpid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(compat_40_mount)\n(void *type_, void *path_, long long flags_, void *data_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_40_mount)\n(long long res, void *type_, void *path_, long long flags_, void *data_) {\n  /* TODO */\n}\nPRE_SYSCALL(unmount)(void *path_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(unmount)(long long res, void *path_, long long flags_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(setuid)(long long uid_) { /* Nothing to do */ }\nPOST_SYSCALL(setuid)(long long res, long long uid_) { /* Nothing to do */ }\nPRE_SYSCALL(getuid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(getuid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(geteuid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(geteuid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(ptrace)\n(long long req_, long long pid_, void *addr_, long long data_) {\n  if (req_ == ptrace_pt_io) {\n    struct __sanitizer_ptrace_io_desc *addr =\n        (struct __sanitizer_ptrace_io_desc *)addr_;\n    PRE_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz);\n    if (addr->piod_op == ptrace_piod_write_d ||\n        addr->piod_op == ptrace_piod_write_i) {\n      PRE_READ(addr->piod_addr, addr->piod_len);\n    }\n    if (addr->piod_op == ptrace_piod_read_d ||\n        addr->piod_op == ptrace_piod_read_i ||\n        addr->piod_op == ptrace_piod_read_auxv) {\n      PRE_WRITE(addr->piod_addr, addr->piod_len);\n    }\n  } else if (req_ == ptrace_pt_lwpinfo) {\n    struct __sanitizer_ptrace_lwpinfo *addr =\n        (struct __sanitizer_ptrace_lwpinfo *)addr_;\n    PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));\n    PRE_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz);\n  } else if (req_ == ptrace_pt_set_event_mask) {\n    PRE_READ(addr_, struct_ptrace_ptrace_event_struct_sz);\n  } else if (req_ == ptrace_pt_get_event_mask) {\n    PRE_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz);\n  } else if (req_ == ptrace_pt_set_siginfo) {\n    PRE_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);\n  } else if (req_ == ptrace_pt_get_siginfo) {\n    PRE_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);\n  } else if (req_ == ptrace_pt_lwpstatus) {\n    struct __sanitizer_ptrace_lwpstatus *addr =\n        (struct __sanitizer_ptrace_lwpstatus *)addr_;\n    PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));\n    PRE_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);\n  } else if (req_ == ptrace_pt_lwpnext) {\n    struct __sanitizer_ptrace_lwpstatus *addr =\n        (struct __sanitizer_ptrace_lwpstatus *)addr_;\n    PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));\n    PRE_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);\n  } else if (req_ == ptrace_pt_setregs) {\n    PRE_READ(addr_, struct_ptrace_reg_struct_sz);\n  } else if (req_ == ptrace_pt_getregs) {\n    PRE_WRITE(addr_, struct_ptrace_reg_struct_sz);\n  } else if (req_ == ptrace_pt_setfpregs) {\n    PRE_READ(addr_, struct_ptrace_fpreg_struct_sz);\n  } else if (req_ == ptrace_pt_getfpregs) {\n    PRE_WRITE(addr_, struct_ptrace_fpreg_struct_sz);\n  } else if (req_ == ptrace_pt_setdbregs) {\n    PRE_READ(addr_, struct_ptrace_dbreg_struct_sz);\n  } else if (req_ == ptrace_pt_getdbregs) {\n    PRE_WRITE(addr_, struct_ptrace_dbreg_struct_sz);\n  }\n}\nPOST_SYSCALL(ptrace)\n(long long res, long long req_, long long pid_, void *addr_, long long data_) {\n  if (res == 0) {\n    if (req_ == ptrace_pt_io) {\n      struct __sanitizer_ptrace_io_desc *addr =\n          (struct __sanitizer_ptrace_io_desc *)addr_;\n      POST_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz);\n      if (addr->piod_op == ptrace_piod_write_d ||\n          addr->piod_op == ptrace_piod_write_i) {\n        POST_READ(addr->piod_addr, addr->piod_len);\n      }\n      if (addr->piod_op == ptrace_piod_read_d ||\n          addr->piod_op == ptrace_piod_read_i ||\n          addr->piod_op == ptrace_piod_read_auxv) {\n        POST_WRITE(addr->piod_addr, addr->piod_len);\n      }\n    } else if (req_ == ptrace_pt_lwpinfo) {\n      struct __sanitizer_ptrace_lwpinfo *addr =\n          (struct __sanitizer_ptrace_lwpinfo *)addr_;\n      POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));\n      POST_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz);\n    } else if (req_ == ptrace_pt_set_event_mask) {\n      POST_READ(addr_, struct_ptrace_ptrace_event_struct_sz);\n    } else if (req_ == ptrace_pt_get_event_mask) {\n      POST_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz);\n    } else if (req_ == ptrace_pt_set_siginfo) {\n      POST_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);\n    } else if (req_ == ptrace_pt_get_siginfo) {\n      POST_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);\n    } else if (req_ == ptrace_pt_lwpstatus) {\n      struct __sanitizer_ptrace_lwpstatus *addr =\n          (struct __sanitizer_ptrace_lwpstatus *)addr_;\n      POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));\n      POST_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);\n    } else if (req_ == ptrace_pt_lwpnext) {\n      struct __sanitizer_ptrace_lwpstatus *addr =\n          (struct __sanitizer_ptrace_lwpstatus *)addr_;\n      POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));\n      POST_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);\n    } else if (req_ == ptrace_pt_setregs) {\n      POST_READ(addr_, struct_ptrace_reg_struct_sz);\n    } else if (req_ == ptrace_pt_getregs) {\n      POST_WRITE(addr_, struct_ptrace_reg_struct_sz);\n    } else if (req_ == ptrace_pt_setfpregs) {\n      POST_READ(addr_, struct_ptrace_fpreg_struct_sz);\n    } else if (req_ == ptrace_pt_getfpregs) {\n      POST_WRITE(addr_, struct_ptrace_fpreg_struct_sz);\n    } else if (req_ == ptrace_pt_setdbregs) {\n      POST_READ(addr_, struct_ptrace_dbreg_struct_sz);\n    } else if (req_ == ptrace_pt_getdbregs) {\n      POST_WRITE(addr_, struct_ptrace_dbreg_struct_sz);\n    }\n  }\n}\nPRE_SYSCALL(recvmsg)(long long s_, void *msg_, long long flags_) {\n  PRE_WRITE(msg_, sizeof(__sanitizer_msghdr));\n}\nPOST_SYSCALL(recvmsg)\n(long long res, long long s_, void *msg_, long long flags_) {\n  if (res > 0) {\n    POST_WRITE(msg_, sizeof(__sanitizer_msghdr));\n  }\n}\nPRE_SYSCALL(sendmsg)(long long s_, void *msg_, long long flags_) {\n  PRE_READ(msg_, sizeof(__sanitizer_msghdr));\n}\nPOST_SYSCALL(sendmsg)\n(long long res, long long s_, void *msg_, long long flags_) {\n  if (res > 0) {\n    POST_READ(msg_, sizeof(__sanitizer_msghdr));\n  }\n}\nPRE_SYSCALL(recvfrom)\n(long long s_, void *buf_, long long len_, long long flags_, void *from_,\n  void *fromlenaddr_) {\n  PRE_WRITE(buf_, len_);\n  PRE_WRITE(from_, struct_sockaddr_sz);\n  PRE_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t));\n}\nPOST_SYSCALL(recvfrom)\n(long long res, long long s_, void *buf_, long long len_, long long flags_,\n  void *from_, void *fromlenaddr_) {\n  if (res >= 0) {\n    POST_WRITE(buf_, res);\n    POST_WRITE(from_, struct_sockaddr_sz);\n    POST_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t));\n  }\n}\nPRE_SYSCALL(accept)(long long s_, void *name_, void *anamelen_) {\n  PRE_WRITE(name_, struct_sockaddr_sz);\n  PRE_WRITE(anamelen_, sizeof(__sanitizer_socklen_t));\n}\nPOST_SYSCALL(accept)\n(long long res, long long s_, void *name_, void *anamelen_) {\n  if (res == 0) {\n    POST_WRITE(name_, struct_sockaddr_sz);\n    POST_WRITE(anamelen_, sizeof(__sanitizer_socklen_t));\n  }\n}\nPRE_SYSCALL(getpeername)(long long fdes_, void *asa_, void *alen_) {\n  PRE_WRITE(asa_, struct_sockaddr_sz);\n  PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t));\n}\nPOST_SYSCALL(getpeername)\n(long long res, long long fdes_, void *asa_, void *alen_) {\n  if (res == 0) {\n    POST_WRITE(asa_, struct_sockaddr_sz);\n    POST_WRITE(alen_, sizeof(__sanitizer_socklen_t));\n  }\n}\nPRE_SYSCALL(getsockname)(long long fdes_, void *asa_, void *alen_) {\n  PRE_WRITE(asa_, struct_sockaddr_sz);\n  PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t));\n}\nPOST_SYSCALL(getsockname)\n(long long res, long long fdes_, void *asa_, void *alen_) {\n  if (res == 0) {\n    POST_WRITE(asa_, struct_sockaddr_sz);\n    POST_WRITE(alen_, sizeof(__sanitizer_socklen_t));\n  }\n}\nPRE_SYSCALL(access)(void *path_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(access)(long long res, void *path_, long long flags_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(chflags)(void *path_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(chflags)(long long res, void *path_, long long flags_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(fchflags)(long long fd_, long long flags_) { /* Nothing to do */ }\nPOST_SYSCALL(fchflags)(long long res, long long fd_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(sync)(void) { /* Nothing to do */ }\nPOST_SYSCALL(sync)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(kill)(long long pid_, long long signum_) { /* Nothing to do */ }\nPOST_SYSCALL(kill)(long long res, long long pid_, long long signum_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_43_stat43)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_43_stat43)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(getppid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(getppid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(compat_43_lstat43)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_43_lstat43)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(dup)(long long fd_) { /* Nothing to do */ }\nPOST_SYSCALL(dup)(long long res, long long fd_) { /* Nothing to do */ }\nPRE_SYSCALL(pipe)(void) {\n  /* pipe returns two descriptors through two returned values */\n}\nPOST_SYSCALL(pipe)(long long res) {\n  /* pipe returns two descriptors through two returned values */\n}\nPRE_SYSCALL(getegid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(getegid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(profil)\n(void *samples_, long long size_, long long offset_, long long scale_) {\n  if (samples_) {\n    PRE_WRITE(samples_, size_);\n  }\n}\nPOST_SYSCALL(profil)\n(long long res, void *samples_, long long size_, long long offset_,\n  long long scale_) {\n  if (res == 0) {\n    if (samples_) {\n      POST_WRITE(samples_, size_);\n    }\n  }\n}\nPRE_SYSCALL(ktrace)\n(void *fname_, long long ops_, long long facs_, long long pid_) {\n  const char *fname = (const char *)fname_;\n  if (fname) {\n    PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1);\n  }\n}\nPOST_SYSCALL(ktrace)\n(long long res, void *fname_, long long ops_, long long facs_, long long pid_) {\n  const char *fname = (const char *)fname_;\n  if (res == 0) {\n    if (fname) {\n      POST_READ(fname, __sanitizer::internal_strlen(fname) + 1);\n    }\n  }\n}\nPRE_SYSCALL(compat_13_sigaction13)(long long signum_, void *nsa_, void *osa_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_13_sigaction13)\n(long long res, long long signum_, void *nsa_, void *osa_) {\n  /* TODO */\n}\nPRE_SYSCALL(getgid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(getgid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(compat_13_sigprocmask13)(long long how_, long long mask_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_13_sigprocmask13)\n(long long res, long long how_, long long mask_) {\n  /* TODO */\n}\nPRE_SYSCALL(__getlogin)(void *namebuf_, long long namelen_) {\n  if (namebuf_) {\n    PRE_WRITE(namebuf_, namelen_);\n  }\n}\nPOST_SYSCALL(__getlogin)(long long res, void *namebuf_, long long namelen_) {\n  if (res == 0) {\n    if (namebuf_) {\n      POST_WRITE(namebuf_, namelen_);\n    }\n  }\n}\nPRE_SYSCALL(__setlogin)(void *namebuf_) {\n  const char *namebuf = (const char *)namebuf_;\n  if (namebuf) {\n    PRE_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1);\n  }\n}\nPOST_SYSCALL(__setlogin)(long long res, void *namebuf_) {\n  if (res == 0) {\n    const char *namebuf = (const char *)namebuf_;\n    if (namebuf) {\n      POST_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1);\n    }\n  }\n}\nPRE_SYSCALL(acct)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(acct)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(compat_13_sigpending13)(void) { /* TODO */ }\nPOST_SYSCALL(compat_13_sigpending13)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_13_sigaltstack13)(void *nss_, void *oss_) { /* TODO */ }\nPOST_SYSCALL(compat_13_sigaltstack13)(long long res, void *nss_, void *oss_) {\n  /* TODO */\n}\nPRE_SYSCALL(ioctl)(long long fd_, long long com_, void *data_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(ioctl)(long long res, long long fd_, long long com_, void *data_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_12_oreboot)(long long opt_) { /* TODO */ }\nPOST_SYSCALL(compat_12_oreboot)(long long res, long long opt_) { /* TODO */ }\nPRE_SYSCALL(revoke)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(revoke)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(symlink)(void *path_, void *link_) {\n  const char *path = (const char *)path_;\n  const char *link = (const char *)link_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (link) {\n    PRE_READ(link, __sanitizer::internal_strlen(link) + 1);\n  }\n}\nPOST_SYSCALL(symlink)(long long res, void *path_, void *link_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    const char *link = (const char *)link_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n    if (link) {\n      POST_READ(link, __sanitizer::internal_strlen(link) + 1);\n    }\n  }\n}\nPRE_SYSCALL(readlink)(void *path_, void *buf_, long long count_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (buf_) {\n    PRE_WRITE(buf_, count_);\n  }\n}\nPOST_SYSCALL(readlink)\n(long long res, void *path_, void *buf_, long long count_) {\n  if (res > 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n    if (buf_) {\n      PRE_WRITE(buf_, res);\n    }\n  }\n}\nPRE_SYSCALL(execve)(void *path_, void *argp_, void *envp_) {\n  const char *path = (const char *)path_;\n  char **argp = (char **)argp_;\n  char **envp = (char **)envp_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (argp && argp[0]) {\n    char *a = argp[0];\n    while (a++) {\n      PRE_READ(a, __sanitizer::internal_strlen(a) + 1);\n    }\n  }\n  if (envp && envp[0]) {\n    char *e = envp[0];\n    while (e++) {\n      PRE_READ(e, __sanitizer::internal_strlen(e) + 1);\n    }\n  }\n}\nPOST_SYSCALL(execve)(long long res, void *path_, void *argp_, void *envp_) {\n  /* If we are here, something went wrong */\n  const char *path = (const char *)path_;\n  char **argp = (char **)argp_;\n  char **envp = (char **)envp_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (argp && argp[0]) {\n    char *a = argp[0];\n    while (a++) {\n      POST_READ(a, __sanitizer::internal_strlen(a) + 1);\n    }\n  }\n  if (envp && envp[0]) {\n    char *e = envp[0];\n    while (e++) {\n      POST_READ(e, __sanitizer::internal_strlen(e) + 1);\n    }\n  }\n}\nPRE_SYSCALL(umask)(long long newmask_) { /* Nothing to do */ }\nPOST_SYSCALL(umask)(long long res, long long newmask_) { /* Nothing to do */ }\nPRE_SYSCALL(chroot)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(chroot)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(compat_43_fstat43)(long long fd_, void *sb_) { /* TODO */ }\nPOST_SYSCALL(compat_43_fstat43)(long long res, long long fd_, void *sb_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogetkerninfo)\n(long long op_, void *where_, void *size_, long long arg_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_ogetkerninfo)\n(long long res, long long op_, void *where_, void *size_, long long arg_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogetpagesize)(void) { /* TODO */ }\nPOST_SYSCALL(compat_43_ogetpagesize)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_12_msync)(void *addr_, long long len_) { /* TODO */ }\nPOST_SYSCALL(compat_12_msync)(long long res, void *addr_, long long len_) {\n  /* TODO */\n}\nPRE_SYSCALL(vfork)(void) { /* Nothing to do */ }\nPOST_SYSCALL(vfork)(long long res) { /* Nothing to do */ }\n/* syscall 67 has been skipped */\n/* syscall 68 has been skipped */\n/* syscall 69 has been skipped */\n/* syscall 70 has been skipped */\nPRE_SYSCALL(compat_43_ommap)\n(void *addr_, long long len_, long long prot_, long long flags_, long long fd_,\n  long long pos_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_ommap)\n(long long res, void *addr_, long long len_, long long prot_, long long flags_,\n  long long fd_, long long pos_) {\n  /* TODO */\n}\nPRE_SYSCALL(vadvise)(long long anom_) { /* Nothing to do */ }\nPOST_SYSCALL(vadvise)(long long res, long long anom_) { /* Nothing to do */ }\nPRE_SYSCALL(munmap)(void *addr_, long long len_) { /* Nothing to do */ }\nPOST_SYSCALL(munmap)(long long res, void *addr_, long long len_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(mprotect)(void *addr_, long long len_, long long prot_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(mprotect)\n(long long res, void *addr_, long long len_, long long prot_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(madvise)(void *addr_, long long len_, long long behav_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(madvise)\n(long long res, void *addr_, long long len_, long long behav_) {\n  /* Nothing to do */\n}\n/* syscall 76 has been skipped */\n/* syscall 77 has been skipped */\nPRE_SYSCALL(mincore)(void *addr_, long long len_, void *vec_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(mincore)(long long res, void *addr_, long long len_, void *vec_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(getgroups)(long long gidsetsize_, void *gidset_) {\n  unsigned int *gidset = (unsigned int *)gidset_;\n  if (gidset) {\n    PRE_WRITE(gidset, sizeof(*gidset) * gidsetsize_);\n  }\n}\nPOST_SYSCALL(getgroups)(long long res, long long gidsetsize_, void *gidset_) {\n  if (res == 0) {\n    unsigned int *gidset = (unsigned int *)gidset_;\n    if (gidset) {\n      POST_WRITE(gidset, sizeof(*gidset) * gidsetsize_);\n    }\n  }\n}\nPRE_SYSCALL(setgroups)(long long gidsetsize_, void *gidset_) {\n  unsigned int *gidset = (unsigned int *)gidset_;\n  if (gidset) {\n    PRE_READ(gidset, sizeof(*gidset) * gidsetsize_);\n  }\n}\nPOST_SYSCALL(setgroups)(long long res, long long gidsetsize_, void *gidset_) {\n  if (res == 0) {\n    unsigned int *gidset = (unsigned int *)gidset_;\n    if (gidset) {\n      POST_READ(gidset, sizeof(*gidset) * gidsetsize_);\n    }\n  }\n}\nPRE_SYSCALL(getpgrp)(void) { /* Nothing to do */ }\nPOST_SYSCALL(getpgrp)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(setpgid)(long long pid_, long long pgid_) { /* Nothing to do */ }\nPOST_SYSCALL(setpgid)(long long res, long long pid_, long long pgid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50_setitimer)(long long which_, void *itv_, void *oitv_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_setitimer)\n(long long res, long long which_, void *itv_, void *oitv_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_owait)(void) { /* TODO */ }\nPOST_SYSCALL(compat_43_owait)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_12_oswapon)(void *name_) { /* TODO */ }\nPOST_SYSCALL(compat_12_oswapon)(long long res, void *name_) { /* TODO */ }\nPRE_SYSCALL(compat_50_getitimer)(long long which_, void *itv_) { /* TODO */ }\nPOST_SYSCALL(compat_50_getitimer)(long long res, long long which_, void *itv_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogethostname)(void *hostname_, long long len_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_ogethostname)\n(long long res, void *hostname_, long long len_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_osethostname)(void *hostname_, long long len_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_osethostname)\n(long long res, void *hostname_, long long len_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogetdtablesize)(void) { /* TODO */ }\nPOST_SYSCALL(compat_43_ogetdtablesize)(long long res) { /* TODO */ }\nPRE_SYSCALL(dup2)(long long from_, long long to_) { /* Nothing to do */ }\nPOST_SYSCALL(dup2)(long long res, long long from_, long long to_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(getrandom)(void *buf_, long long buflen_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(getrandom)\n(long long res, void *buf_, long long buflen_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(fcntl)(long long fd_, long long cmd_, void *arg_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fcntl)(long long res, long long fd_, long long cmd_, void *arg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50_select)\n(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_select)\n(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {\n  /* TODO */\n}\n/* syscall 94 has been skipped */\nPRE_SYSCALL(fsync)(long long fd_) { /* Nothing to do */ }\nPOST_SYSCALL(fsync)(long long res, long long fd_) { /* Nothing to do */ }\nPRE_SYSCALL(setpriority)(long long which_, long long who_, long long prio_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(setpriority)\n(long long res, long long which_, long long who_, long long prio_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_30_socket)\n(long long domain_, long long type_, long long protocol_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_30_socket)\n(long long res, long long domain_, long long type_, long long protocol_) {\n  /* TODO */\n}\nPRE_SYSCALL(connect)(long long s_, void *name_, long long namelen_) {\n  PRE_READ(name_, namelen_);\n}\nPOST_SYSCALL(connect)\n(long long res, long long s_, void *name_, long long namelen_) {\n  if (res == 0) {\n    POST_READ(name_, namelen_);\n  }\n}\nPRE_SYSCALL(compat_43_oaccept)(long long s_, void *name_, void *anamelen_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_oaccept)\n(long long res, long long s_, void *name_, void *anamelen_) {\n  /* TODO */\n}\nPRE_SYSCALL(getpriority)(long long which_, long long who_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(getpriority)(long long res, long long which_, long long who_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_43_osend)\n(long long s_, void *buf_, long long len_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_osend)\n(long long res, long long s_, void *buf_, long long len_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_orecv)\n(long long s_, void *buf_, long long len_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_orecv)\n(long long res, long long s_, void *buf_, long long len_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_13_sigreturn13)(void *sigcntxp_) { /* TODO */ }\nPOST_SYSCALL(compat_13_sigreturn13)(long long res, void *sigcntxp_) {\n  /* TODO */\n}\nPRE_SYSCALL(bind)(long long s_, void *name_, long long namelen_) {\n  PRE_READ(name_, namelen_);\n}\nPOST_SYSCALL(bind)\n(long long res, long long s_, void *name_, long long namelen_) {\n  if (res == 0) {\n    PRE_READ(name_, namelen_);\n  }\n}\nPRE_SYSCALL(setsockopt)\n(long long s_, long long level_, long long name_, void *val_,\n  long long valsize_) {\n  if (val_) {\n    PRE_READ(val_, valsize_);\n  }\n}\nPOST_SYSCALL(setsockopt)\n(long long res, long long s_, long long level_, long long name_, void *val_,\n  long long valsize_) {\n  if (res == 0) {\n    if (val_) {\n      POST_READ(val_, valsize_);\n    }\n  }\n}\nPRE_SYSCALL(listen)(long long s_, long long backlog_) { /* Nothing to do */ }\nPOST_SYSCALL(listen)(long long res, long long s_, long long backlog_) {\n  /* Nothing to do */\n}\n/* syscall 107 has been skipped */\nPRE_SYSCALL(compat_43_osigvec)(long long signum_, void *nsv_, void *osv_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_osigvec)\n(long long res, long long signum_, void *nsv_, void *osv_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_osigblock)(long long mask_) { /* TODO */ }\nPOST_SYSCALL(compat_43_osigblock)(long long res, long long mask_) { /* TODO */ }\nPRE_SYSCALL(compat_43_osigsetmask)(long long mask_) { /* TODO */ }\nPOST_SYSCALL(compat_43_osigsetmask)(long long res, long long mask_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_13_sigsuspend13)(long long mask_) { /* TODO */ }\nPOST_SYSCALL(compat_13_sigsuspend13)(long long res, long long mask_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_osigstack)(void *nss_, void *oss_) { /* TODO */ }\nPOST_SYSCALL(compat_43_osigstack)(long long res, void *nss_, void *oss_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_orecvmsg)(long long s_, void *msg_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_orecvmsg)\n(long long res, long long s_, void *msg_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_osendmsg)(long long s_, void *msg_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_osendmsg)\n(long long res, long long s_, void *msg_, long long flags_) {\n  /* TODO */\n}\n/* syscall 115 has been skipped */\nPRE_SYSCALL(compat_50_gettimeofday)(void *tp_, void *tzp_) { /* TODO */ }\nPOST_SYSCALL(compat_50_gettimeofday)(long long res, void *tp_, void *tzp_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_getrusage)(long long who_, void *rusage_) { /* TODO */ }\nPOST_SYSCALL(compat_50_getrusage)\n(long long res, long long who_, void *rusage_) {\n  /* TODO */\n}\nPRE_SYSCALL(getsockopt)\n(long long s_, long long level_, long long name_, void *val_, void *avalsize_) {\n  /* TODO */\n}\nPOST_SYSCALL(getsockopt)\n(long long res, long long s_, long long level_, long long name_, void *val_,\n  void *avalsize_) {\n  /* TODO */\n}\n/* syscall 119 has been skipped */\nPRE_SYSCALL(readv)(long long fd_, void *iovp_, long long iovcnt_) {\n  struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;\n  int i;\n  if (iovp) {\n    PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);\n    for (i = 0; i < iovcnt_; i++) {\n      PRE_WRITE(iovp[i].iov_base, iovp[i].iov_len);\n    }\n  }\n}\nPOST_SYSCALL(readv)\n(long long res, long long fd_, void *iovp_, long long iovcnt_) {\n  struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;\n  int i;\n  uptr m, n = res;\n  if (res > 0) {\n    if (iovp) {\n      POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);\n      for (i = 0; i < iovcnt_ && n > 0; i++) {\n        m = n > iovp[i].iov_len ? iovp[i].iov_len : n;\n        POST_WRITE(iovp[i].iov_base, m);\n        n -= m;\n      }\n    }\n  }\n}\nPRE_SYSCALL(writev)(long long fd_, void *iovp_, long long iovcnt_) {\n  struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;\n  int i;\n  if (iovp) {\n    PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);\n    for (i = 0; i < iovcnt_; i++) {\n      PRE_READ(iovp[i].iov_base, iovp[i].iov_len);\n    }\n  }\n}\nPOST_SYSCALL(writev)\n(long long res, long long fd_, void *iovp_, long long iovcnt_) {\n  struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;\n  int i;\n  uptr m, n = res;\n  if (res > 0) {\n    if (iovp) {\n      POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);\n      for (i = 0; i < iovcnt_ && n > 0; i++) {\n        m = n > iovp[i].iov_len ? iovp[i].iov_len : n;\n        POST_READ(iovp[i].iov_base, m);\n        n -= m;\n      }\n    }\n  }\n}\nPRE_SYSCALL(compat_50_settimeofday)(void *tv_, void *tzp_) { /* TODO */ }\nPOST_SYSCALL(compat_50_settimeofday)(long long res, void *tv_, void *tzp_) {\n  /* TODO */\n}\nPRE_SYSCALL(fchown)(long long fd_, long long uid_, long long gid_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fchown)\n(long long res, long long fd_, long long uid_, long long gid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(fchmod)(long long fd_, long long mode_) { /* Nothing to do */ }\nPOST_SYSCALL(fchmod)(long long res, long long fd_, long long mode_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_43_orecvfrom)\n(long long s_, void *buf_, long long len_, long long flags_, void *from_,\n  void *fromlenaddr_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_orecvfrom)\n(long long res, long long s_, void *buf_, long long len_, long long flags_,\n  void *from_, void *fromlenaddr_) {\n  /* TODO */\n}\nPRE_SYSCALL(setreuid)(long long ruid_, long long euid_) { /* Nothing to do */ }\nPOST_SYSCALL(setreuid)(long long res, long long ruid_, long long euid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(setregid)(long long rgid_, long long egid_) { /* Nothing to do */ }\nPOST_SYSCALL(setregid)(long long res, long long rgid_, long long egid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(rename)(void *from_, void *to_) {\n  const char *from = (const char *)from_;\n  const char *to = (const char *)to_;\n  if (from) {\n    PRE_READ(from, __sanitizer::internal_strlen(from) + 1);\n  }\n  if (to) {\n    PRE_READ(to, __sanitizer::internal_strlen(to) + 1);\n  }\n}\nPOST_SYSCALL(rename)(long long res, void *from_, void *to_) {\n  if (res == 0) {\n    const char *from = (const char *)from_;\n    const char *to = (const char *)to_;\n    if (from) {\n      POST_READ(from, __sanitizer::internal_strlen(from) + 1);\n    }\n    if (to) {\n      POST_READ(to, __sanitizer::internal_strlen(to) + 1);\n    }\n  }\n}\nPRE_SYSCALL(compat_43_otruncate)(void *path_, long long length_) { /* TODO */ }\nPOST_SYSCALL(compat_43_otruncate)\n(long long res, void *path_, long long length_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_oftruncate)(long long fd_, long long length_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_oftruncate)\n(long long res, long long fd_, long long length_) {\n  /* TODO */\n}\nPRE_SYSCALL(flock)(long long fd_, long long how_) { /* Nothing to do */ }\nPOST_SYSCALL(flock)(long long res, long long fd_, long long how_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(mkfifo)(void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(mkfifo)(long long res, void *path_, long long mode_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(sendto)\n(long long s_, void *buf_, long long len_, long long flags_, void *to_,\n  long long tolen_) {\n  PRE_READ(buf_, len_);\n  PRE_READ(to_, tolen_);\n}\nPOST_SYSCALL(sendto)\n(long long res, long long s_, void *buf_, long long len_, long long flags_,\n  void *to_, long long tolen_) {\n  if (res >= 0) {\n    POST_READ(buf_, len_);\n    POST_READ(to_, tolen_);\n  }\n}\nPRE_SYSCALL(shutdown)(long long s_, long long how_) { /* Nothing to do */ }\nPOST_SYSCALL(shutdown)(long long res, long long s_, long long how_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(socketpair)\n(long long domain_, long long type_, long long protocol_, void *rsv_) {\n  PRE_WRITE(rsv_, 2 * sizeof(int));\n}\nPOST_SYSCALL(socketpair)\n(long long res, long long domain_, long long type_, long long protocol_,\n  void *rsv_) {\n  if (res == 0) {\n    POST_WRITE(rsv_, 2 * sizeof(int));\n  }\n}\nPRE_SYSCALL(mkdir)(void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(mkdir)(long long res, void *path_, long long mode_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(rmdir)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(rmdir)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(compat_50_utimes)(void *path_, void *tptr_) { /* TODO */ }\nPOST_SYSCALL(compat_50_utimes)(long long res, void *path_, void *tptr_) {\n  /* TODO */\n}\n/* syscall 139 has been skipped */\nPRE_SYSCALL(compat_50_adjtime)(void *delta_, void *olddelta_) { /* TODO */ }\nPOST_SYSCALL(compat_50_adjtime)(long long res, void *delta_, void *olddelta_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogetpeername)(long long fdes_, void *asa_, void *alen_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_ogetpeername)\n(long long res, long long fdes_, void *asa_, void *alen_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogethostid)(void) { /* TODO */ }\nPOST_SYSCALL(compat_43_ogethostid)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_43_osethostid)(long long hostid_) { /* TODO */ }\nPOST_SYSCALL(compat_43_osethostid)(long long res, long long hostid_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_ogetrlimit)(long long which_, void *rlp_) { /* TODO */ }\nPOST_SYSCALL(compat_43_ogetrlimit)\n(long long res, long long which_, void *rlp_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_osetrlimit)(long long which_, void *rlp_) { /* TODO */ }\nPOST_SYSCALL(compat_43_osetrlimit)\n(long long res, long long which_, void *rlp_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_okillpg)(long long pgid_, long long signum_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_okillpg)\n(long long res, long long pgid_, long long signum_) {\n  /* TODO */\n}\nPRE_SYSCALL(setsid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(setsid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(compat_50_quotactl)\n(void *path_, long long cmd_, long long uid_, void *arg_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_quotactl)\n(long long res, void *path_, long long cmd_, long long uid_, void *arg_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_43_oquota)(void) { /* TODO */ }\nPOST_SYSCALL(compat_43_oquota)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_43_ogetsockname)(long long fdec_, void *asa_, void *alen_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_ogetsockname)\n(long long res, long long fdec_, void *asa_, void *alen_) {\n  /* TODO */\n}\n/* syscall 151 has been skipped */\n/* syscall 152 has been skipped */\n/* syscall 153 has been skipped */\n/* syscall 154 has been skipped */\nPRE_SYSCALL(nfssvc)(long long flag_, void *argp_) { /* Nothing to do */ }\nPOST_SYSCALL(nfssvc)(long long res, long long flag_, void *argp_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_43_ogetdirentries)\n(long long fd_, void *buf_, long long count_, void *basep_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_43_ogetdirentries)\n(long long res, long long fd_, void *buf_, long long count_, void *basep_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_20_statfs)(void *path_, void *buf_) { /* TODO */ }\nPOST_SYSCALL(compat_20_statfs)(long long res, void *path_, void *buf_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_20_fstatfs)(long long fd_, void *buf_) { /* TODO */ }\nPOST_SYSCALL(compat_20_fstatfs)(long long res, long long fd_, void *buf_) {\n  /* TODO */\n}\n/* syscall 159 has been skipped */\n/* syscall 160 has been skipped */\nPRE_SYSCALL(compat_30_getfh)(void *fname_, void *fhp_) { /* TODO */ }\nPOST_SYSCALL(compat_30_getfh)(long long res, void *fname_, void *fhp_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_09_ogetdomainname)(void *domainname_, long long len_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_09_ogetdomainname)\n(long long res, void *domainname_, long long len_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_09_osetdomainname)(void *domainname_, long long len_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_09_osetdomainname)\n(long long res, void *domainname_, long long len_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_09_ouname)(void *name_) { /* TODO */ }\nPOST_SYSCALL(compat_09_ouname)(long long res, void *name_) { /* TODO */ }\nPRE_SYSCALL(sysarch)(long long op_, void *parms_) { /* TODO */ }\nPOST_SYSCALL(sysarch)(long long res, long long op_, void *parms_) { /* TODO */ }\nPRE_SYSCALL(__futex)\n(void *uaddr_, long long op_, long long val_, void *timeout_, void *uaddr2_,\n  long long val2_, long long val3_) {\n  /* TODO */\n}\nPOST_SYSCALL(__futex)\n(long long res, void *uaddr_, long long op_, long long val_, void *timeout_,\n  void *uaddr2_, long long val2_, long long val3_) {\n  /* TODO */\n}\nPRE_SYSCALL(__futex_set_robust_list)(void *head_, long long len_) { /* TODO */ }\nPOST_SYSCALL(__futex_set_robust_list)\n(long long res, void *head_, long long len_) {\n  /* TODO */\n}\nPRE_SYSCALL(__futex_get_robust_list)\n(long long lwpid_, void **headp_, void *lenp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__futex_get_robust_list)\n(long long res, long long lwpid_, void **headp_, void *lenp_) {\n  /* TODO */\n}\n#if !defined(_LP64)\nPRE_SYSCALL(compat_10_osemsys)\n(long long which_, long long a2_, long long a3_, long long a4_, long long a5_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_10_osemsys)\n(long long res, long long which_, long long a2_, long long a3_, long long a4_,\n  long long a5_) {\n  /* TODO */\n}\n#else\n/* syscall 169 has been skipped */\n#endif\n#if !defined(_LP64)\nPRE_SYSCALL(compat_10_omsgsys)\n(long long which_, long long a2_, long long a3_, long long a4_, long long a5_,\n  long long a6_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_10_omsgsys)\n(long long res, long long which_, long long a2_, long long a3_, long long a4_,\n  long long a5_, long long a6_) {\n  /* TODO */\n}\n#else\n/* syscall 170 has been skipped */\n#endif\n#if !defined(_LP64)\nPRE_SYSCALL(compat_10_oshmsys)\n(long long which_, long long a2_, long long a3_, long long a4_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_10_oshmsys)\n(long long res, long long which_, long long a2_, long long a3_, long long a4_) {\n  /* TODO */\n}\n#else\n/* syscall 171 has been skipped */\n#endif\n/* syscall 172 has been skipped */\nPRE_SYSCALL(pread)\n(long long fd_, void *buf_, long long nbyte_, long long PAD_,\n  long long offset_) {\n  if (buf_) {\n    PRE_WRITE(buf_, nbyte_);\n  }\n}\nPOST_SYSCALL(pread)\n(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_,\n  long long offset_) {\n  if (res > 0) {\n    POST_WRITE(buf_, res);\n  }\n}\nPRE_SYSCALL(pwrite)\n(long long fd_, void *buf_, long long nbyte_, long long PAD_,\n  long long offset_) {\n  if (buf_) {\n    PRE_READ(buf_, nbyte_);\n  }\n}\nPOST_SYSCALL(pwrite)\n(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_,\n  long long offset_) {\n  if (res > 0) {\n    POST_READ(buf_, res);\n  }\n}\nPRE_SYSCALL(compat_30_ntp_gettime)(void *ntvp_) { /* TODO */ }\nPOST_SYSCALL(compat_30_ntp_gettime)(long long res, void *ntvp_) { /* TODO */ }\n#if defined(NTP) || !defined(_KERNEL_OPT)\nPRE_SYSCALL(ntp_adjtime)(void *tp_) { /* Nothing to do */ }\nPOST_SYSCALL(ntp_adjtime)(long long res, void *tp_) { /* Nothing to do */ }\n#else\n/* syscall 176 has been skipped */\n#endif\n/* syscall 177 has been skipped */\n/* syscall 178 has been skipped */\n/* syscall 179 has been skipped */\n/* syscall 180 has been skipped */\nPRE_SYSCALL(setgid)(long long gid_) { /* Nothing to do */ }\nPOST_SYSCALL(setgid)(long long res, long long gid_) { /* Nothing to do */ }\nPRE_SYSCALL(setegid)(long long egid_) { /* Nothing to do */ }\nPOST_SYSCALL(setegid)(long long res, long long egid_) { /* Nothing to do */ }\nPRE_SYSCALL(seteuid)(long long euid_) { /* Nothing to do */ }\nPOST_SYSCALL(seteuid)(long long res, long long euid_) { /* Nothing to do */ }\nPRE_SYSCALL(lfs_bmapv)(void *fsidp_, void *blkiov_, long long blkcnt_) {\n  /* TODO */\n}\nPOST_SYSCALL(lfs_bmapv)\n(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) {\n  /* TODO */\n}\nPRE_SYSCALL(lfs_markv)(void *fsidp_, void *blkiov_, long long blkcnt_) {\n  /* TODO */\n}\nPOST_SYSCALL(lfs_markv)\n(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) {\n  /* TODO */\n}\nPRE_SYSCALL(lfs_segclean)(void *fsidp_, long long segment_) { /* TODO */ }\nPOST_SYSCALL(lfs_segclean)(long long res, void *fsidp_, long long segment_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_lfs_segwait)(void *fsidp_, void *tv_) { /* TODO */ }\nPOST_SYSCALL(compat_50_lfs_segwait)(long long res, void *fsidp_, void *tv_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_12_stat12)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_12_stat12)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_12_fstat12)(long long fd_, void *sb_) { /* TODO */ }\nPOST_SYSCALL(compat_12_fstat12)(long long res, long long fd_, void *sb_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_12_lstat12)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_12_lstat12)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(pathconf)(void *path_, long long name_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(pathconf)(long long res, void *path_, long long name_) {\n  if (res != -1) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(fpathconf)(long long fd_, long long name_) { /* Nothing to do */ }\nPOST_SYSCALL(fpathconf)(long long res, long long fd_, long long name_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(getsockopt2)\n(long long s_, long long level_, long long name_, void *val_, void *avalsize_) {\n  /* TODO */\n}\nPOST_SYSCALL(getsockopt2)\n(long long res, long long s_, long long level_, long long name_, void *val_,\n  void *avalsize_) {\n  /* TODO */\n}\nPRE_SYSCALL(getrlimit)(long long which_, void *rlp_) {\n  PRE_WRITE(rlp_, struct_rlimit_sz);\n}\nPOST_SYSCALL(getrlimit)(long long res, long long which_, void *rlp_) {\n  if (res == 0) {\n    POST_WRITE(rlp_, struct_rlimit_sz);\n  }\n}\nPRE_SYSCALL(setrlimit)(long long which_, void *rlp_) {\n  PRE_READ(rlp_, struct_rlimit_sz);\n}\nPOST_SYSCALL(setrlimit)(long long res, long long which_, void *rlp_) {\n  if (res == 0) {\n    POST_READ(rlp_, struct_rlimit_sz);\n  }\n}\nPRE_SYSCALL(compat_12_getdirentries)\n(long long fd_, void *buf_, long long count_, void *basep_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_12_getdirentries)\n(long long res, long long fd_, void *buf_, long long count_, void *basep_) {\n  /* TODO */\n}\nPRE_SYSCALL(mmap)\n(void *addr_, long long len_, long long prot_, long long flags_, long long fd_,\n  long long PAD_, long long pos_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(mmap)\n(long long res, void *addr_, long long len_, long long prot_, long long flags_,\n  long long fd_, long long PAD_, long long pos_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__syscall)\n(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(lseek)\n(long long fd_, long long PAD_, long long offset_, long long whence_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(lseek)\n(long long res, long long fd_, long long PAD_, long long offset_,\n  long long whence_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(truncate)(void *path_, long long PAD_, long long length_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(truncate)\n(long long res, void *path_, long long PAD_, long long length_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(ftruncate)(long long fd_, long long PAD_, long long length_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(ftruncate)\n(long long res, long long fd_, long long PAD_, long long length_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__sysctl)\n(void *name_, long long namelen_, void *oldv_, void *oldlenp_, void *newv_,\n  long long newlen_) {\n  const int *name = (const int *)name_;\n  if (name) {\n    PRE_READ(name, namelen_ * sizeof(*name));\n  }\n  if (newv_) {\n    PRE_READ(name, newlen_);\n  }\n}\nPOST_SYSCALL(__sysctl)\n(long long res, void *name_, long long namelen_, void *oldv_, void *oldlenp_,\n  void *newv_, long long newlen_) {\n  if (res == 0) {\n    const int *name = (const int *)name_;\n    if (name) {\n      POST_READ(name, namelen_ * sizeof(*name));\n    }\n    if (newv_) {\n      POST_READ(name, newlen_);\n    }\n  }\n}\nPRE_SYSCALL(mlock)(void *addr_, long long len_) { /* Nothing to do */ }\nPOST_SYSCALL(mlock)(long long res, void *addr_, long long len_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(munlock)(void *addr_, long long len_) { /* Nothing to do */ }\nPOST_SYSCALL(munlock)(long long res, void *addr_, long long len_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(undelete)(void *path_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(undelete)(long long res, void *path_) {\n  if (res == 0) {\n    const char *path = (const char *)path_;\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(compat_50_futimes)(long long fd_, void *tptr_) { /* TODO */ }\nPOST_SYSCALL(compat_50_futimes)(long long res, long long fd_, void *tptr_) {\n  /* TODO */\n}\nPRE_SYSCALL(getpgid)(long long pid_) { /* Nothing to do */ }\nPOST_SYSCALL(getpgid)(long long res, long long pid_) { /* Nothing to do */ }\nPRE_SYSCALL(reboot)(long long opt_, void *bootstr_) {\n  const char *bootstr = (const char *)bootstr_;\n  if (bootstr) {\n    PRE_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1);\n  }\n}\nPOST_SYSCALL(reboot)(long long res, long long opt_, void *bootstr_) {\n  /* This call should never return */\n  const char *bootstr = (const char *)bootstr_;\n  if (bootstr) {\n    POST_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1);\n  }\n}\nPRE_SYSCALL(poll)(void *fds_, long long nfds_, long long timeout_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(poll)\n(long long res, void *fds_, long long nfds_, long long timeout_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(afssys)\n(long long id_, long long a1_, long long a2_, long long a3_, long long a4_,\n  long long a5_, long long a6_) {\n  /* TODO */\n}\nPOST_SYSCALL(afssys)\n(long long res, long long id_, long long a1_, long long a2_, long long a3_,\n  long long a4_, long long a5_, long long a6_) {\n  /* TODO */\n}\n/* syscall 211 has been skipped */\n/* syscall 212 has been skipped */\n/* syscall 213 has been skipped */\n/* syscall 214 has been skipped */\n/* syscall 215 has been skipped */\n/* syscall 216 has been skipped */\n/* syscall 217 has been skipped */\n/* syscall 218 has been skipped */\n/* syscall 219 has been skipped */\nPRE_SYSCALL(compat_14___semctl)\n(long long semid_, long long semnum_, long long cmd_, void *arg_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_14___semctl)\n(long long res, long long semid_, long long semnum_, long long cmd_,\n  void *arg_) {\n  /* TODO */\n}\nPRE_SYSCALL(semget)(long long key_, long long nsems_, long long semflg_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(semget)\n(long long res, long long key_, long long nsems_, long long semflg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(semop)(long long semid_, void *sops_, long long nsops_) {\n  if (sops_) {\n    PRE_READ(sops_, nsops_ * struct_sembuf_sz);\n  }\n}\nPOST_SYSCALL(semop)\n(long long res, long long semid_, void *sops_, long long nsops_) {\n  if (res == 0) {\n    if (sops_) {\n      POST_READ(sops_, nsops_ * struct_sembuf_sz);\n    }\n  }\n}\nPRE_SYSCALL(semconfig)(long long flag_) { /* Nothing to do */ }\nPOST_SYSCALL(semconfig)(long long res, long long flag_) { /* Nothing to do */ }\nPRE_SYSCALL(compat_14_msgctl)(long long msqid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_14_msgctl)\n(long long res, long long msqid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPRE_SYSCALL(msgget)(long long key_, long long msgflg_) { /* Nothing to do */ }\nPOST_SYSCALL(msgget)(long long res, long long key_, long long msgflg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(msgsnd)\n(long long msqid_, void *msgp_, long long msgsz_, long long msgflg_) {\n  if (msgp_) {\n    PRE_READ(msgp_, msgsz_);\n  }\n}\nPOST_SYSCALL(msgsnd)\n(long long res, long long msqid_, void *msgp_, long long msgsz_,\n  long long msgflg_) {\n  if (res == 0) {\n    if (msgp_) {\n      POST_READ(msgp_, msgsz_);\n    }\n  }\n}\nPRE_SYSCALL(msgrcv)\n(long long msqid_, void *msgp_, long long msgsz_, long long msgtyp_,\n  long long msgflg_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(msgrcv)\n(long long res, long long msqid_, void *msgp_, long long msgsz_,\n  long long msgtyp_, long long msgflg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(shmat)(long long shmid_, void *shmaddr_, long long shmflg_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(shmat)\n(long long res, long long shmid_, void *shmaddr_, long long shmflg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_14_shmctl)(long long shmid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_14_shmctl)\n(long long res, long long shmid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPRE_SYSCALL(shmdt)(void *shmaddr_) { /* Nothing to do */ }\nPOST_SYSCALL(shmdt)(long long res, void *shmaddr_) { /* Nothing to do */ }\nPRE_SYSCALL(shmget)(long long key_, long long size_, long long shmflg_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(shmget)\n(long long res, long long key_, long long size_, long long shmflg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50_clock_gettime)(long long clock_id_, void *tp_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_clock_gettime)\n(long long res, long long clock_id_, void *tp_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_clock_settime)(long long clock_id_, void *tp_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_clock_settime)\n(long long res, long long clock_id_, void *tp_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_clock_getres)(long long clock_id_, void *tp_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_clock_getres)\n(long long res, long long clock_id_, void *tp_) {\n  /* TODO */\n}\nPRE_SYSCALL(timer_create)(long long clock_id_, void *evp_, void *timerid_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(timer_create)\n(long long res, long long clock_id_, void *evp_, void *timerid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(timer_delete)(long long timerid_) { /* Nothing to do */ }\nPOST_SYSCALL(timer_delete)(long long res, long long timerid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50_timer_settime)\n(long long timerid_, long long flags_, void *value_, void *ovalue_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_timer_settime)\n(long long res, long long timerid_, long long flags_, void *value_,\n  void *ovalue_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_timer_gettime)(long long timerid_, void *value_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_timer_gettime)\n(long long res, long long timerid_, void *value_) {\n  /* TODO */\n}\nPRE_SYSCALL(timer_getoverrun)(long long timerid_) { /* Nothing to do */ }\nPOST_SYSCALL(timer_getoverrun)(long long res, long long timerid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50_nanosleep)(void *rqtp_, void *rmtp_) { /* TODO */ }\nPOST_SYSCALL(compat_50_nanosleep)(long long res, void *rqtp_, void *rmtp_) {\n  /* TODO */\n}\nPRE_SYSCALL(fdatasync)(long long fd_) { /* Nothing to do */ }\nPOST_SYSCALL(fdatasync)(long long res, long long fd_) { /* Nothing to do */ }\nPRE_SYSCALL(mlockall)(long long flags_) { /* Nothing to do */ }\nPOST_SYSCALL(mlockall)(long long res, long long flags_) { /* Nothing to do */ }\nPRE_SYSCALL(munlockall)(void) { /* Nothing to do */ }\nPOST_SYSCALL(munlockall)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(compat_50___sigtimedwait)(void *set_, void *info_, void *timeout_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50___sigtimedwait)\n(long long res, void *set_, void *info_, void *timeout_) {\n  /* TODO */\n}\nPRE_SYSCALL(sigqueueinfo)(long long pid_, void *info_) {\n  if (info_) {\n    PRE_READ(info_, siginfo_t_sz);\n  }\n}\nPOST_SYSCALL(sigqueueinfo)(long long res, long long pid_, void *info_) {}\nPRE_SYSCALL(modctl)(long long cmd_, void *arg_) { /* TODO */ }\nPOST_SYSCALL(modctl)(long long res, long long cmd_, void *arg_) { /* TODO */ }\nPRE_SYSCALL(_ksem_init)(long long value_, void *idp_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_init)(long long res, long long value_, void *idp_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_ksem_open)\n(void *name_, long long oflag_, long long mode_, long long value_, void *idp_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    PRE_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPOST_SYSCALL(_ksem_open)\n(long long res, void *name_, long long oflag_, long long mode_,\n  long long value_, void *idp_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    POST_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPRE_SYSCALL(_ksem_unlink)(void *name_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    PRE_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPOST_SYSCALL(_ksem_unlink)(long long res, void *name_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    POST_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPRE_SYSCALL(_ksem_close)(long long id_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_close)(long long res, long long id_) { /* Nothing to do */ }\nPRE_SYSCALL(_ksem_post)(long long id_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_post)(long long res, long long id_) { /* Nothing to do */ }\nPRE_SYSCALL(_ksem_wait)(long long id_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_wait)(long long res, long long id_) { /* Nothing to do */ }\nPRE_SYSCALL(_ksem_trywait)(long long id_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_trywait)(long long res, long long id_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_ksem_getvalue)(long long id_, void *value_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_getvalue)(long long res, long long id_, void *value_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_ksem_destroy)(long long id_) { /* Nothing to do */ }\nPOST_SYSCALL(_ksem_destroy)(long long res, long long id_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_ksem_timedwait)(long long id_, void *abstime_) {\n  if (abstime_) {\n    PRE_READ(abstime_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(_ksem_timedwait)(long long res, long long id_, void *abstime_) {}\nPRE_SYSCALL(mq_open)\n(void *name_, long long oflag_, long long mode_, void *attr_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    PRE_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPOST_SYSCALL(mq_open)\n(long long res, void *name_, long long oflag_, long long mode_, void *attr_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    POST_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPRE_SYSCALL(mq_close)(long long mqdes_) { /* Nothing to do */ }\nPOST_SYSCALL(mq_close)(long long res, long long mqdes_) { /* Nothing to do */ }\nPRE_SYSCALL(mq_unlink)(void *name_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    PRE_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPOST_SYSCALL(mq_unlink)(long long res, void *name_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    POST_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPRE_SYSCALL(mq_getattr)(long long mqdes_, void *mqstat_) { /* Nothing to do */ }\nPOST_SYSCALL(mq_getattr)(long long res, long long mqdes_, void *mqstat_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(mq_setattr)(long long mqdes_, void *mqstat_, void *omqstat_) {\n  if (mqstat_) {\n    PRE_READ(mqstat_, struct_mq_attr_sz);\n  }\n}\nPOST_SYSCALL(mq_setattr)\n(long long res, long long mqdes_, void *mqstat_, void *omqstat_) {}\nPRE_SYSCALL(mq_notify)(long long mqdes_, void *notification_) {\n  if (notification_) {\n    PRE_READ(notification_, struct_sigevent_sz);\n  }\n}\nPOST_SYSCALL(mq_notify)(long long res, long long mqdes_, void *notification_) {}\nPRE_SYSCALL(mq_send)\n(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_) {\n  if (msg_ptr_) {\n    PRE_READ(msg_ptr_, msg_len_);\n  }\n}\nPOST_SYSCALL(mq_send)\n(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,\n  long long msg_prio_) {}\nPRE_SYSCALL(mq_receive)\n(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(mq_receive)\n(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,\n  void *msg_prio_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50_mq_timedsend)\n(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_,\n  void *abs_timeout_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_mq_timedsend)\n(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,\n  long long msg_prio_, void *abs_timeout_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_mq_timedreceive)\n(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_,\n  void *abs_timeout_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_mq_timedreceive)\n(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,\n  void *msg_prio_, void *abs_timeout_) {\n  /* TODO */\n}\n/* syscall 267 has been skipped */\n/* syscall 268 has been skipped */\n/* syscall 269 has been skipped */\nPRE_SYSCALL(__posix_rename)(void *from_, void *to_) {\n  const char *from = (const char *)from_;\n  const char *to = (const char *)to_;\n  if (from_) {\n    PRE_READ(from, __sanitizer::internal_strlen(from) + 1);\n  }\n  if (to) {\n    PRE_READ(to, __sanitizer::internal_strlen(to) + 1);\n  }\n}\nPOST_SYSCALL(__posix_rename)(long long res, void *from_, void *to_) {\n  const char *from = (const char *)from_;\n  const char *to = (const char *)to_;\n  if (from) {\n    POST_READ(from, __sanitizer::internal_strlen(from) + 1);\n  }\n  if (to) {\n    POST_READ(to, __sanitizer::internal_strlen(to) + 1);\n  }\n}\nPRE_SYSCALL(swapctl)(long long cmd_, void *arg_, long long misc_) { /* TODO */ }\nPOST_SYSCALL(swapctl)\n(long long res, long long cmd_, void *arg_, long long misc_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_30_getdents)(long long fd_, void *buf_, long long count_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_30_getdents)\n(long long res, long long fd_, void *buf_, long long count_) {\n  /* TODO */\n}\nPRE_SYSCALL(minherit)(void *addr_, long long len_, long long inherit_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(minherit)\n(long long res, void *addr_, long long len_, long long inherit_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(lchmod)(void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(lchmod)(long long res, void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(lchown)(void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(lchown)\n(long long res, void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(compat_50_lutimes)(void *path_, void *tptr_) { /* TODO */ }\nPOST_SYSCALL(compat_50_lutimes)(long long res, void *path_, void *tptr_) {\n  /* TODO */\n}\nPRE_SYSCALL(__msync13)(void *addr_, long long len_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__msync13)\n(long long res, void *addr_, long long len_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_30___stat13)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_30___stat13)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_30___fstat13)(long long fd_, void *sb_) { /* TODO */ }\nPOST_SYSCALL(compat_30___fstat13)(long long res, long long fd_, void *sb_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_30___lstat13)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_30___lstat13)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(__sigaltstack14)(void *nss_, void *oss_) {\n  if (nss_) {\n    PRE_READ(nss_, struct_sigaltstack_sz);\n  }\n  if (oss_) {\n    PRE_READ(oss_, struct_sigaltstack_sz);\n  }\n}\nPOST_SYSCALL(__sigaltstack14)(long long res, void *nss_, void *oss_) {}\nPRE_SYSCALL(__vfork14)(void) { /* Nothing to do */ }\nPOST_SYSCALL(__vfork14)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(__posix_chown)(void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__posix_chown)\n(long long res, void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(__posix_fchown)(long long fd_, long long uid_, long long gid_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__posix_fchown)\n(long long res, long long fd_, long long uid_, long long gid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__posix_lchown)(void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__posix_lchown)\n(long long res, void *path_, long long uid_, long long gid_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(getsid)(long long pid_) { /* Nothing to do */ }\nPOST_SYSCALL(getsid)(long long res, long long pid_) { /* Nothing to do */ }\nPRE_SYSCALL(__clone)(long long flags_, void *stack_) { /* Nothing to do */ }\nPOST_SYSCALL(__clone)(long long res, long long flags_, void *stack_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(fktrace)\n(long long fd_, long long ops_, long long facs_, long long pid_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fktrace)\n(long long res, long long fd_, long long ops_, long long facs_,\n  long long pid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(preadv)\n(long long fd_, void *iovp_, long long iovcnt_, long long PAD_,\n  long long offset_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(preadv)\n(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_,\n  long long offset_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(pwritev)\n(long long fd_, void *iovp_, long long iovcnt_, long long PAD_,\n  long long offset_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(pwritev)\n(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_,\n  long long offset_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_16___sigaction14)\n(long long signum_, void *nsa_, void *osa_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_16___sigaction14)\n(long long res, long long signum_, void *nsa_, void *osa_) {\n  /* TODO */\n}\nPRE_SYSCALL(__sigpending14)(void *set_) { /* Nothing to do */ }\nPOST_SYSCALL(__sigpending14)(long long res, void *set_) { /* Nothing to do */ }\nPRE_SYSCALL(__sigprocmask14)(long long how_, void *set_, void *oset_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__sigprocmask14)\n(long long res, long long how_, void *set_, void *oset_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__sigsuspend14)(void *set_) {\n  if (set_) {\n    PRE_READ(set_, sizeof(__sanitizer_sigset_t));\n  }\n}\nPOST_SYSCALL(__sigsuspend14)(long long res, void *set_) {\n  if (set_) {\n    PRE_READ(set_, sizeof(__sanitizer_sigset_t));\n  }\n}\nPRE_SYSCALL(compat_16___sigreturn14)(void *sigcntxp_) { /* TODO */ }\nPOST_SYSCALL(compat_16___sigreturn14)(long long res, void *sigcntxp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__getcwd)(void *bufp_, long long length_) { /* Nothing to do */ }\nPOST_SYSCALL(__getcwd)(long long res, void *bufp_, long long length_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(fchroot)(long long fd_) { /* Nothing to do */ }\nPOST_SYSCALL(fchroot)(long long res, long long fd_) { /* Nothing to do */ }\nPRE_SYSCALL(compat_30_fhopen)(void *fhp_, long long flags_) { /* TODO */ }\nPOST_SYSCALL(compat_30_fhopen)(long long res, void *fhp_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_30_fhstat)(void *fhp_, void *sb_) { /* TODO */ }\nPOST_SYSCALL(compat_30_fhstat)(long long res, void *fhp_, void *sb_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_20_fhstatfs)(void *fhp_, void *buf_) { /* TODO */ }\nPOST_SYSCALL(compat_20_fhstatfs)(long long res, void *fhp_, void *buf_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_____semctl13)\n(long long semid_, long long semnum_, long long cmd_, void *arg_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_____semctl13)\n(long long res, long long semid_, long long semnum_, long long cmd_,\n  void *arg_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50___msgctl13)\n(long long msqid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50___msgctl13)\n(long long res, long long msqid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50___shmctl13)\n(long long shmid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50___shmctl13)\n(long long res, long long shmid_, long long cmd_, void *buf_) {\n  /* TODO */\n}\nPRE_SYSCALL(lchflags)(void *path_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(lchflags)(long long res, void *path_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(issetugid)(void) { /* Nothing to do */ }\nPOST_SYSCALL(issetugid)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(utrace)(void *label_, void *addr_, long long len_) {\n  const char *label = (const char *)label_;\n  if (label) {\n    PRE_READ(label, __sanitizer::internal_strlen(label) + 1);\n  }\n  if (addr_) {\n    PRE_READ(addr_, len_);\n  }\n}\nPOST_SYSCALL(utrace)(long long res, void *label_, void *addr_, long long len_) {\n  const char *label = (const char *)label_;\n  if (label) {\n    POST_READ(label, __sanitizer::internal_strlen(label) + 1);\n  }\n  if (addr_) {\n    POST_READ(addr_, len_);\n  }\n}\nPRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ }\nPOST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ }\nPRE_SYSCALL(setcontext)(void *ucp_) {\n  if (ucp_) {\n    PRE_READ(ucp_, ucontext_t_sz(ucp_));\n  }\n}\nPOST_SYSCALL(setcontext)(long long res, void *ucp_) {}\nPRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) {\n  if (ucp_) {\n    PRE_READ(ucp_, ucontext_t_sz(ucp_));\n  }\n}\nPOST_SYSCALL(_lwp_create)\n(long long res, void *ucp_, long long flags_, void *new_lwp_) {}\nPRE_SYSCALL(_lwp_exit)(void) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_exit)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(_lwp_self)(void) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_self)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(_lwp_wait)(long long wait_for_, void *departed_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_lwp_wait)(long long res, long long wait_for_, void *departed_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_suspend)(long long target_) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_suspend)(long long res, long long target_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_continue)(long long target_) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_continue)(long long res, long long target_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_wakeup)(long long target_) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_wakeup)(long long res, long long target_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_getprivate)(void) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_getprivate)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(_lwp_setprivate)(void *ptr_) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_setprivate)(long long res, void *ptr_) { /* Nothing to do */ }\nPRE_SYSCALL(_lwp_kill)(long long target_, long long signo_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_lwp_kill)(long long res, long long target_, long long signo_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_detach)(long long target_) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_detach)(long long res, long long target_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_50__lwp_park)\n(void *ts_, long long unpark_, void *hint_, void *unparkhint_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50__lwp_park)\n(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) {\n  /* TODO */\n}\nPRE_SYSCALL(_lwp_unpark)(long long target_, void *hint_) { /* Nothing to do */ }\nPOST_SYSCALL(_lwp_unpark)(long long res, long long target_, void *hint_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_unpark_all)(void *targets_, long long ntargets_, void *hint_) {\n  if (targets_) {\n    PRE_READ(targets_, ntargets_ * sizeof(__sanitizer_lwpid_t));\n  }\n}\nPOST_SYSCALL(_lwp_unpark_all)\n(long long res, void *targets_, long long ntargets_, void *hint_) {}\nPRE_SYSCALL(_lwp_setname)(long long target_, void *name_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    PRE_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPOST_SYSCALL(_lwp_setname)(long long res, long long target_, void *name_) {\n  const char *name = (const char *)name_;\n  if (name) {\n    POST_READ(name, __sanitizer::internal_strlen(name) + 1);\n  }\n}\nPRE_SYSCALL(_lwp_getname)(long long target_, void *name_, long long len_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_lwp_getname)\n(long long res, long long target_, void *name_, long long len_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_lwp_ctl)(long long features_, void **address_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_lwp_ctl)(long long res, long long features_, void **address_) {\n  /* Nothing to do */\n}\n/* syscall 326 has been skipped */\n/* syscall 327 has been skipped */\n/* syscall 328 has been skipped */\n/* syscall 329 has been skipped */\nPRE_SYSCALL(compat_60_sa_register)\n(void *newv_, void **oldv_, long long flags_, long long stackinfo_offset_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_60_sa_register)\n(long long res, void *newv_, void **oldv_, long long flags_,\n  long long stackinfo_offset_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_60_sa_stacks)(long long num_, void *stacks_) { /* TODO */ }\nPOST_SYSCALL(compat_60_sa_stacks)\n(long long res, long long num_, void *stacks_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_60_sa_enable)(void) { /* TODO */ }\nPOST_SYSCALL(compat_60_sa_enable)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_60_sa_setconcurrency)(long long concurrency_) { /* TODO */ }\nPOST_SYSCALL(compat_60_sa_setconcurrency)\n(long long res, long long concurrency_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_60_sa_yield)(void) { /* TODO */ }\nPOST_SYSCALL(compat_60_sa_yield)(long long res) { /* TODO */ }\nPRE_SYSCALL(compat_60_sa_preempt)(long long sa_id_) { /* TODO */ }\nPOST_SYSCALL(compat_60_sa_preempt)(long long res, long long sa_id_) {\n  /* TODO */\n}\n/* syscall 336 has been skipped */\n/* syscall 337 has been skipped */\n/* syscall 338 has been skipped */\n/* syscall 339 has been skipped */\nPRE_SYSCALL(__sigaction_sigtramp)\n(long long signum_, void *nsa_, void *osa_, void *tramp_, long long vers_) {\n  if (nsa_) {\n    PRE_READ(nsa_, sizeof(__sanitizer_sigaction));\n  }\n}\nPOST_SYSCALL(__sigaction_sigtramp)\n(long long res, long long signum_, void *nsa_, void *osa_, void *tramp_,\n  long long vers_) {\n  if (nsa_) {\n    PRE_READ(nsa_, sizeof(__sanitizer_sigaction));\n  }\n}\n/* syscall 341 has been skipped */\n/* syscall 342 has been skipped */\nPRE_SYSCALL(rasctl)(void *addr_, long long len_, long long op_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(rasctl)\n(long long res, void *addr_, long long len_, long long op_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(kqueue)(void) { /* Nothing to do */ }\nPOST_SYSCALL(kqueue)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(compat_50_kevent)\n(long long fd_, void *changelist_, long long nchanges_, void *eventlist_,\n  long long nevents_, void *timeout_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_kevent)\n(long long res, long long fd_, void *changelist_, long long nchanges_,\n  void *eventlist_, long long nevents_, void *timeout_) {\n  /* TODO */\n}\nPRE_SYSCALL(_sched_setparam)\n(long long pid_, long long lid_, long long policy_, void *params_) {\n  if (params_) {\n    PRE_READ(params_, struct_sched_param_sz);\n  }\n}\nPOST_SYSCALL(_sched_setparam)\n(long long res, long long pid_, long long lid_, long long policy_,\n  void *params_) {\n  if (params_) {\n    PRE_READ(params_, struct_sched_param_sz);\n  }\n}\nPRE_SYSCALL(_sched_getparam)\n(long long pid_, long long lid_, void *policy_, void *params_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_sched_getparam)\n(long long res, long long pid_, long long lid_, void *policy_, void *params_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_sched_setaffinity)\n(long long pid_, long long lid_, long long size_, void *cpuset_) {\n  if (cpuset_) {\n    PRE_READ(cpuset_, size_);\n  }\n}\nPOST_SYSCALL(_sched_setaffinity)\n(long long res, long long pid_, long long lid_, long long size_,\n  void *cpuset_) {\n  if (cpuset_) {\n    PRE_READ(cpuset_, size_);\n  }\n}\nPRE_SYSCALL(_sched_getaffinity)\n(long long pid_, long long lid_, long long size_, void *cpuset_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_sched_getaffinity)\n(long long res, long long pid_, long long lid_, long long size_,\n  void *cpuset_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(sched_yield)(void) { /* Nothing to do */ }\nPOST_SYSCALL(sched_yield)(long long res) { /* Nothing to do */ }\nPRE_SYSCALL(_sched_protect)(long long priority_) { /* Nothing to do */ }\nPOST_SYSCALL(_sched_protect)(long long res, long long priority_) {\n  /* Nothing to do */\n}\n/* syscall 352 has been skipped */\n/* syscall 353 has been skipped */\nPRE_SYSCALL(fsync_range)\n(long long fd_, long long flags_, long long start_, long long length_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fsync_range)\n(long long res, long long fd_, long long flags_, long long start_,\n  long long length_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(uuidgen)(void *store_, long long count_) { /* Nothing to do */ }\nPOST_SYSCALL(uuidgen)(long long res, void *store_, long long count_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_90_getvfsstat)\n(void *buf_, long long bufsize_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(compat_90_getvfsstat)\n(long long res, void *buf_, long long bufsize_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_90_statvfs1)(void *path_, void *buf_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(compat_90_statvfs1)\n(long long res, void *path_, void *buf_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(compat_90_fstatvfs1)(long long fd_, void *buf_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(compat_90_fstatvfs1)\n(long long res, long long fd_, void *buf_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(compat_30_fhstatvfs1)(void *fhp_, void *buf_, long long flags_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_30_fhstatvfs1)\n(long long res, void *fhp_, void *buf_, long long flags_) {\n  /* TODO */\n}\nPRE_SYSCALL(extattrctl)\n(void *path_, long long cmd_, void *filename_, long long attrnamespace_,\n  void *attrname_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattrctl)\n(long long res, void *path_, long long cmd_, void *filename_,\n  long long attrnamespace_, void *attrname_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_set_file)\n(void *path_, long long attrnamespace_, void *attrname_, void *data_,\n  long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_set_file)\n(long long res, void *path_, long long attrnamespace_, void *attrname_,\n  void *data_, long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_get_file)\n(void *path_, long long attrnamespace_, void *attrname_, void *data_,\n  long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_get_file)\n(long long res, void *path_, long long attrnamespace_, void *attrname_,\n  void *data_, long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_delete_file)\n(void *path_, long long attrnamespace_, void *attrname_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_delete_file)\n(long long res, void *path_, long long attrnamespace_, void *attrname_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_set_fd)\n(long long fd_, long long attrnamespace_, void *attrname_, void *data_,\n  long long nbytes_) {\n  /* TODO */\n}\nPOST_SYSCALL(extattr_set_fd)\n(long long res, long long fd_, long long attrnamespace_, void *attrname_,\n  void *data_, long long nbytes_) {\n  /* TODO */\n}\nPRE_SYSCALL(extattr_get_fd)\n(long long fd_, long long attrnamespace_, void *attrname_, void *data_,\n  long long nbytes_) {\n  /* TODO */\n}\nPOST_SYSCALL(extattr_get_fd)\n(long long res, long long fd_, long long attrnamespace_, void *attrname_,\n  void *data_, long long nbytes_) {\n  /* TODO */\n}\nPRE_SYSCALL(extattr_delete_fd)\n(long long fd_, long long attrnamespace_, void *attrname_) {\n  /* TODO */\n}\nPOST_SYSCALL(extattr_delete_fd)\n(long long res, long long fd_, long long attrnamespace_, void *attrname_) {\n  /* TODO */\n}\nPRE_SYSCALL(extattr_set_link)\n(void *path_, long long attrnamespace_, void *attrname_, void *data_,\n  long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_set_link)\n(long long res, void *path_, long long attrnamespace_, void *attrname_,\n  void *data_, long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_get_link)\n(void *path_, long long attrnamespace_, void *attrname_, void *data_,\n  long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_get_link)\n(long long res, void *path_, long long attrnamespace_, void *attrname_,\n  void *data_, long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_delete_link)\n(void *path_, long long attrnamespace_, void *attrname_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_delete_link)\n(long long res, void *path_, long long attrnamespace_, void *attrname_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_list_fd)\n(long long fd_, long long attrnamespace_, void *data_, long long nbytes_) {\n  /* TODO */\n}\nPOST_SYSCALL(extattr_list_fd)\n(long long res, long long fd_, long long attrnamespace_, void *data_,\n  long long nbytes_) {\n  /* TODO */\n}\nPRE_SYSCALL(extattr_list_file)\n(void *path_, long long attrnamespace_, void *data_, long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_list_file)\n(long long res, void *path_, long long attrnamespace_, void *data_,\n  long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(extattr_list_link)\n(void *path_, long long attrnamespace_, void *data_, long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(extattr_list_link)\n(long long res, void *path_, long long attrnamespace_, void *data_,\n  long long nbytes_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(compat_50_pselect)\n(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_pselect)\n(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_,\n  void *mask_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50_pollts)\n(void *fds_, long long nfds_, void *ts_, void *mask_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_pollts)\n(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {\n  /* TODO */\n}\nPRE_SYSCALL(setxattr)\n(void *path_, void *name_, void *value_, long long size_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(setxattr)\n(long long res, void *path_, void *name_, void *value_, long long size_,\n  long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(lsetxattr)\n(void *path_, void *name_, void *value_, long long size_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(lsetxattr)\n(long long res, void *path_, void *name_, void *value_, long long size_,\n  long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(fsetxattr)\n(long long fd_, void *name_, void *value_, long long size_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fsetxattr)\n(long long res, long long fd_, void *name_, void *value_, long long size_,\n  long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(getxattr)(void *path_, void *name_, void *value_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(getxattr)\n(long long res, void *path_, void *name_, void *value_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(lgetxattr)\n(void *path_, void *name_, void *value_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(lgetxattr)\n(long long res, void *path_, void *name_, void *value_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(fgetxattr)\n(long long fd_, void *name_, void *value_, long long size_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fgetxattr)\n(long long res, long long fd_, void *name_, void *value_, long long size_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(listxattr)(void *path_, void *list_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(listxattr)\n(long long res, void *path_, void *list_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(llistxattr)(void *path_, void *list_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(llistxattr)\n(long long res, void *path_, void *list_, long long size_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(flistxattr)(long long fd_, void *list_, long long size_) {\n  /* TODO */\n}\nPOST_SYSCALL(flistxattr)\n(long long res, long long fd_, void *list_, long long size_) {\n  /* TODO */\n}\nPRE_SYSCALL(removexattr)(void *path_, void *name_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(removexattr)(long long res, void *path_, void *name_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(lremovexattr)(void *path_, void *name_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(lremovexattr)(long long res, void *path_, void *name_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(fremovexattr)(long long fd_, void *name_) { /* TODO */ }\nPOST_SYSCALL(fremovexattr)(long long res, long long fd_, void *name_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50___stat30)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_50___stat30)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50___fstat30)(long long fd_, void *sb_) { /* TODO */ }\nPOST_SYSCALL(compat_50___fstat30)(long long res, long long fd_, void *sb_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50___lstat30)(void *path_, void *ub_) { /* TODO */ }\nPOST_SYSCALL(compat_50___lstat30)(long long res, void *path_, void *ub_) {\n  /* TODO */\n}\nPRE_SYSCALL(__getdents30)(long long fd_, void *buf_, long long count_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__getdents30)\n(long long res, long long fd_, void *buf_, long long count_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(posix_fadvise)(long long) { /* Nothing to do */ }\nPOST_SYSCALL(posix_fadvise)(long long res, long long) { /* Nothing to do */ }\nPRE_SYSCALL(compat_30___fhstat30)(void *fhp_, void *sb_) { /* TODO */ }\nPOST_SYSCALL(compat_30___fhstat30)(long long res, void *fhp_, void *sb_) {\n  /* TODO */\n}\nPRE_SYSCALL(compat_50___ntp_gettime30)(void *ntvp_) { /* TODO */ }\nPOST_SYSCALL(compat_50___ntp_gettime30)(long long res, void *ntvp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__socket30)\n(long long domain_, long long type_, long long protocol_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__socket30)\n(long long res, long long domain_, long long type_, long long protocol_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__getfh30)(void *fname_, void *fhp_, void *fh_size_) {\n  const char *fname = (const char *)fname_;\n  if (fname) {\n    PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1);\n  }\n}\nPOST_SYSCALL(__getfh30)\n(long long res, void *fname_, void *fhp_, void *fh_size_) {\n  const char *fname = (const char *)fname_;\n  if (res == 0) {\n    if (fname) {\n      POST_READ(fname, __sanitizer::internal_strlen(fname) + 1);\n    }\n  }\n}\nPRE_SYSCALL(__fhopen40)(void *fhp_, long long fh_size_, long long flags_) {\n  if (fhp_) {\n    PRE_READ(fhp_, fh_size_);\n  }\n}\nPOST_SYSCALL(__fhopen40)\n(long long res, void *fhp_, long long fh_size_, long long flags_) {}\nPRE_SYSCALL(compat_90_fhstatvfs1)\n(void *fhp_, long long fh_size_, void *buf_, long long flags_) {\n  if (fhp_) {\n    PRE_READ(fhp_, fh_size_);\n  }\n}\nPOST_SYSCALL(compat_90_fhstatvfs1)\n(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}\nPRE_SYSCALL(compat_50___fhstat40)(void *fhp_, long long fh_size_, void *sb_) {\n  if (fhp_) {\n    PRE_READ(fhp_, fh_size_);\n  }\n}\nPOST_SYSCALL(compat_50___fhstat40)\n(long long res, void *fhp_, long long fh_size_, void *sb_) {}\nPRE_SYSCALL(aio_cancel)(long long fildes_, void *aiocbp_) {\n  if (aiocbp_) {\n    PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));\n  }\n}\nPOST_SYSCALL(aio_cancel)(long long res, long long fildes_, void *aiocbp_) {}\nPRE_SYSCALL(aio_error)(void *aiocbp_) {\n  if (aiocbp_) {\n    PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));\n  }\n}\nPOST_SYSCALL(aio_error)(long long res, void *aiocbp_) {}\nPRE_SYSCALL(aio_fsync)(long long op_, void *aiocbp_) {\n  if (aiocbp_) {\n    PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));\n  }\n}\nPOST_SYSCALL(aio_fsync)(long long res, long long op_, void *aiocbp_) {}\nPRE_SYSCALL(aio_read)(void *aiocbp_) {\n  if (aiocbp_) {\n    PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));\n  }\n}\nPOST_SYSCALL(aio_read)(long long res, void *aiocbp_) {}\nPRE_SYSCALL(aio_return)(void *aiocbp_) {\n  if (aiocbp_) {\n    PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));\n  }\n}\nPOST_SYSCALL(aio_return)(long long res, void *aiocbp_) {}\nPRE_SYSCALL(compat_50_aio_suspend)\n(void *list_, long long nent_, void *timeout_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_50_aio_suspend)\n(long long res, void *list_, long long nent_, void *timeout_) {\n  /* TODO */\n}\nPRE_SYSCALL(aio_write)(void *aiocbp_) {\n  if (aiocbp_) {\n    PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));\n  }\n}\nPOST_SYSCALL(aio_write)(long long res, void *aiocbp_) {}\nPRE_SYSCALL(lio_listio)\n(long long mode_, void *list_, long long nent_, void *sig_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(lio_listio)\n(long long res, long long mode_, void *list_, long long nent_, void *sig_) {\n  /* Nothing to do */\n}\n/* syscall 407 has been skipped */\n/* syscall 408 has been skipped */\n/* syscall 409 has been skipped */\nPRE_SYSCALL(__mount50)\n(void *type_, void *path_, long long flags_, void *data_, long long data_len_) {\n  const char *type = (const char *)type_;\n  const char *path = (const char *)path_;\n  if (type) {\n    PRE_READ(type, __sanitizer::internal_strlen(type) + 1);\n  }\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (data_) {\n    PRE_READ(data_, data_len_);\n  }\n}\nPOST_SYSCALL(__mount50)\n(long long res, void *type_, void *path_, long long flags_, void *data_,\n  long long data_len_) {\n  const char *type = (const char *)type_;\n  const char *path = (const char *)path_;\n  if (type) {\n    POST_READ(type, __sanitizer::internal_strlen(type) + 1);\n  }\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (data_) {\n    POST_READ(data_, data_len_);\n  }\n}\nPRE_SYSCALL(mremap)\n(void *old_address_, long long old_size_, void *new_address_,\n  long long new_size_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(mremap)\n(long long res, void *old_address_, long long old_size_, void *new_address_,\n  long long new_size_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(pset_create)(void *psid_) { /* Nothing to do */ }\nPOST_SYSCALL(pset_create)(long long res, void *psid_) { /* Nothing to do */ }\nPRE_SYSCALL(pset_destroy)(long long psid_) { /* Nothing to do */ }\nPOST_SYSCALL(pset_destroy)(long long res, long long psid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(pset_assign)(long long psid_, long long cpuid_, void *opsid_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(pset_assign)\n(long long res, long long psid_, long long cpuid_, void *opsid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(_pset_bind)\n(long long idtype_, long long first_id_, long long second_id_, long long psid_,\n  void *opsid_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(_pset_bind)\n(long long res, long long idtype_, long long first_id_, long long second_id_,\n  long long psid_, void *opsid_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__posix_fadvise50)\n(long long fd_, long long PAD_, long long offset_, long long len_,\n  long long advice_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__posix_fadvise50)\n(long long res, long long fd_, long long PAD_, long long offset_,\n  long long len_, long long advice_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__select50)\n(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__select50)\n(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__gettimeofday50)(void *tp_, void *tzp_) { /* Nothing to do */ }\nPOST_SYSCALL(__gettimeofday50)(long long res, void *tp_, void *tzp_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__settimeofday50)(void *tv_, void *tzp_) {\n  if (tv_) {\n    PRE_READ(tv_, timeval_sz);\n  }\n  if (tzp_) {\n    PRE_READ(tzp_, struct_timezone_sz);\n  }\n}\nPOST_SYSCALL(__settimeofday50)(long long res, void *tv_, void *tzp_) {}\nPRE_SYSCALL(__utimes50)(void *path_, void *tptr_) {\n  struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (tptr) {\n    PRE_READ(tptr[0], struct_timespec_sz);\n    PRE_READ(tptr[1], struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__utimes50)(long long res, void *path_, void *tptr_) {}\nPRE_SYSCALL(__adjtime50)(void *delta_, void *olddelta_) {\n  if (delta_) {\n    PRE_READ(delta_, timeval_sz);\n  }\n}\nPOST_SYSCALL(__adjtime50)(long long res, void *delta_, void *olddelta_) {}\nPRE_SYSCALL(__lfs_segwait50)(void *fsidp_, void *tv_) { /* TODO */ }\nPOST_SYSCALL(__lfs_segwait50)(long long res, void *fsidp_, void *tv_) {\n  /* TODO */\n}\nPRE_SYSCALL(__futimes50)(long long fd_, void *tptr_) {\n  struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;\n  if (tptr) {\n    PRE_READ(tptr[0], struct_timespec_sz);\n    PRE_READ(tptr[1], struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__futimes50)(long long res, long long fd_, void *tptr_) {}\nPRE_SYSCALL(__lutimes50)(void *path_, void *tptr_) {\n  struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (tptr) {\n    PRE_READ(tptr[0], struct_timespec_sz);\n    PRE_READ(tptr[1], struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__lutimes50)(long long res, void *path_, void *tptr_) {\n  struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (tptr) {\n    POST_READ(tptr[0], struct_timespec_sz);\n    POST_READ(tptr[1], struct_timespec_sz);\n  }\n}\nPRE_SYSCALL(__setitimer50)(long long which_, void *itv_, void *oitv_) {\n  struct __sanitizer_itimerval *itv = (struct __sanitizer_itimerval *)itv_;\n  if (itv) {\n    PRE_READ(&itv->it_interval.tv_sec, sizeof(__sanitizer_time_t));\n    PRE_READ(&itv->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));\n    PRE_READ(&itv->it_value.tv_sec, sizeof(__sanitizer_time_t));\n    PRE_READ(&itv->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));\n  }\n}\nPOST_SYSCALL(__setitimer50)\n(long long res, long long which_, void *itv_, void *oitv_) {}\nPRE_SYSCALL(__getitimer50)(long long which_, void *itv_) { /* Nothing to do */ }\nPOST_SYSCALL(__getitimer50)(long long res, long long which_, void *itv_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__clock_gettime50)(long long clock_id_, void *tp_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__clock_gettime50)(long long res, long long clock_id_, void *tp_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__clock_settime50)(long long clock_id_, void *tp_) {\n  if (tp_) {\n    PRE_READ(tp_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__clock_settime50)\n(long long res, long long clock_id_, void *tp_) {}\nPRE_SYSCALL(__clock_getres50)(long long clock_id_, void *tp_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__clock_getres50)(long long res, long long clock_id_, void *tp_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__nanosleep50)(void *rqtp_, void *rmtp_) {\n  if (rqtp_) {\n    PRE_READ(rqtp_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__nanosleep50)(long long res, void *rqtp_, void *rmtp_) {}\nPRE_SYSCALL(____sigtimedwait50)(void *set_, void *info_, void *timeout_) {\n  if (set_) {\n    PRE_READ(set_, sizeof(__sanitizer_sigset_t));\n  }\n  if (timeout_) {\n    PRE_READ(timeout_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(____sigtimedwait50)\n(long long res, void *set_, void *info_, void *timeout_) {}\nPRE_SYSCALL(__mq_timedsend50)\n(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_,\n  void *abs_timeout_) {\n  if (msg_ptr_) {\n    PRE_READ(msg_ptr_, msg_len_);\n  }\n  if (abs_timeout_) {\n    PRE_READ(abs_timeout_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__mq_timedsend50)\n(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,\n  long long msg_prio_, void *abs_timeout_) {}\nPRE_SYSCALL(__mq_timedreceive50)\n(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_,\n  void *abs_timeout_) {\n  if (msg_ptr_) {\n    PRE_READ(msg_ptr_, msg_len_);\n  }\n  if (abs_timeout_) {\n    PRE_READ(abs_timeout_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__mq_timedreceive50)\n(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,\n  void *msg_prio_, void *abs_timeout_) {}\nPRE_SYSCALL(compat_60__lwp_park)\n(void *ts_, long long unpark_, void *hint_, void *unparkhint_) {\n  /* TODO */\n}\nPOST_SYSCALL(compat_60__lwp_park)\n(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) {\n  /* TODO */\n}\nPRE_SYSCALL(__kevent50)\n(long long fd_, void *changelist_, long long nchanges_, void *eventlist_,\n  long long nevents_, void *timeout_) {\n  if (changelist_) {\n    PRE_READ(changelist_, nchanges_ * struct_kevent_sz);\n  }\n  if (timeout_) {\n    PRE_READ(timeout_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__kevent50)\n(long long res, long long fd_, void *changelist_, long long nchanges_,\n  void *eventlist_, long long nevents_, void *timeout_) {}\nPRE_SYSCALL(__pselect50)\n(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) {\n  if (ts_) {\n    PRE_READ(ts_, struct_timespec_sz);\n  }\n  if (mask_) {\n    PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t));\n  }\n}\nPOST_SYSCALL(__pselect50)\n(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_,\n  void *mask_) {}\nPRE_SYSCALL(__pollts50)(void *fds_, long long nfds_, void *ts_, void *mask_) {\n  if (ts_) {\n    PRE_READ(ts_, struct_timespec_sz);\n  }\n  if (mask_) {\n    PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t));\n  }\n}\nPOST_SYSCALL(__pollts50)\n(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {}\nPRE_SYSCALL(__aio_suspend50)(void *list_, long long nent_, void *timeout_) {\n  int i;\n  const struct aiocb *const *list = (const struct aiocb *const *)list_;\n  if (list) {\n    for (i = 0; i < nent_; i++) {\n      if (list[i]) {\n        PRE_READ(list[i], sizeof(struct __sanitizer_aiocb));\n      }\n    }\n  }\n  if (timeout_) {\n    PRE_READ(timeout_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(__aio_suspend50)\n(long long res, void *list_, long long nent_, void *timeout_) {}\nPRE_SYSCALL(__stat50)(void *path_, void *ub_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__stat50)(long long res, void *path_, void *ub_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(__fstat50)(long long fd_, void *sb_) { /* Nothing to do */ }\nPOST_SYSCALL(__fstat50)(long long res, long long fd_, void *sb_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__lstat50)(void *path_, void *ub_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__lstat50)(long long res, void *path_, void *ub_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(____semctl50)\n(long long semid_, long long semnum_, long long cmd_, void *arg_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(____semctl50)\n(long long res, long long semid_, long long semnum_, long long cmd_,\n  void *arg_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__shmctl50)(long long shmid_, long long cmd_, void *buf_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__shmctl50)\n(long long res, long long shmid_, long long cmd_, void *buf_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__msgctl50)(long long msqid_, long long cmd_, void *buf_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__msgctl50)\n(long long res, long long msqid_, long long cmd_, void *buf_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__getrusage50)(long long who_, void *rusage_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__getrusage50)(long long res, long long who_, void *rusage_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__timer_settime50)\n(long long timerid_, long long flags_, void *value_, void *ovalue_) {\n  struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_;\n  if (value) {\n    PRE_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t));\n    PRE_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));\n    PRE_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t));\n    PRE_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));\n  }\n}\nPOST_SYSCALL(__timer_settime50)\n(long long res, long long timerid_, long long flags_, void *value_,\n  void *ovalue_) {\n  struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_;\n  if (res == 0) {\n    if (value) {\n      POST_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t));\n      POST_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));\n      POST_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t));\n      POST_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));\n    }\n  }\n}\nPRE_SYSCALL(__timer_gettime50)(long long timerid_, void *value_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__timer_gettime50)\n(long long res, long long timerid_, void *value_) {\n  /* Nothing to do */\n}\n#if defined(NTP) || !defined(_KERNEL_OPT)\nPRE_SYSCALL(__ntp_gettime50)(void *ntvp_) { /* Nothing to do */ }\nPOST_SYSCALL(__ntp_gettime50)(long long res, void *ntvp_) {\n  /* Nothing to do */\n}\n#else\n/* syscall 448 has been skipped */\n#endif\nPRE_SYSCALL(__wait450)\n(long long pid_, void *status_, long long options_, void *rusage_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__wait450)\n(long long res, long long pid_, void *status_, long long options_,\n  void *rusage_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__mknod50)(void *path_, long long mode_, long long dev_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__mknod50)\n(long long res, void *path_, long long mode_, long long dev_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(__fhstat50)(void *fhp_, long long fh_size_, void *sb_) {\n  if (fhp_) {\n    PRE_READ(fhp_, fh_size_);\n  }\n}\nPOST_SYSCALL(__fhstat50)\n(long long res, void *fhp_, long long fh_size_, void *sb_) {\n  if (res == 0) {\n    if (fhp_) {\n      POST_READ(fhp_, fh_size_);\n    }\n  }\n}\n/* syscall 452 has been skipped */\nPRE_SYSCALL(pipe2)(void *fildes_, long long flags_) { /* Nothing to do */ }\nPOST_SYSCALL(pipe2)(long long res, void *fildes_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(dup3)(long long from_, long long to_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(dup3)\n(long long res, long long from_, long long to_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(kqueue1)(long long flags_) { /* Nothing to do */ }\nPOST_SYSCALL(kqueue1)(long long res, long long flags_) { /* Nothing to do */ }\nPRE_SYSCALL(paccept)\n(long long s_, void *name_, void *anamelen_, void *mask_, long long flags_) {\n  if (mask_) {\n    PRE_READ(mask_, sizeof(__sanitizer_sigset_t));\n  }\n}\nPOST_SYSCALL(paccept)\n(long long res, long long s_, void *name_, void *anamelen_, void *mask_,\n  long long flags_) {\n  if (res >= 0) {\n    if (mask_) {\n      PRE_READ(mask_, sizeof(__sanitizer_sigset_t));\n    }\n  }\n}\nPRE_SYSCALL(linkat)\n(long long fd1_, void *name1_, long long fd2_, void *name2_, long long flags_) {\n  const char *name1 = (const char *)name1_;\n  const char *name2 = (const char *)name2_;\n  if (name1) {\n    PRE_READ(name1, __sanitizer::internal_strlen(name1) + 1);\n  }\n  if (name2) {\n    PRE_READ(name2, __sanitizer::internal_strlen(name2) + 1);\n  }\n}\nPOST_SYSCALL(linkat)\n(long long res, long long fd1_, void *name1_, long long fd2_, void *name2_,\n  long long flags_) {\n  const char *name1 = (const char *)name1_;\n  const char *name2 = (const char *)name2_;\n  if (res == 0) {\n    if (name1) {\n      POST_READ(name1, __sanitizer::internal_strlen(name1) + 1);\n    }\n    if (name2) {\n      POST_READ(name2, __sanitizer::internal_strlen(name2) + 1);\n    }\n  }\n}\nPRE_SYSCALL(renameat)\n(long long fromfd_, void *from_, long long tofd_, void *to_) {\n  const char *from = (const char *)from_;\n  const char *to = (const char *)to_;\n  if (from) {\n    PRE_READ(from, __sanitizer::internal_strlen(from) + 1);\n  }\n  if (to) {\n    PRE_READ(to, __sanitizer::internal_strlen(to) + 1);\n  }\n}\nPOST_SYSCALL(renameat)\n(long long res, long long fromfd_, void *from_, long long tofd_, void *to_) {\n  const char *from = (const char *)from_;\n  const char *to = (const char *)to_;\n  if (res == 0) {\n    if (from) {\n      POST_READ(from, __sanitizer::internal_strlen(from) + 1);\n    }\n    if (to) {\n      POST_READ(to, __sanitizer::internal_strlen(to) + 1);\n    }\n  }\n}\nPRE_SYSCALL(mkfifoat)(long long fd_, void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(mkfifoat)\n(long long res, long long fd_, void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(mknodat)\n(long long fd_, void *path_, long long mode_, long long PAD_, long long dev_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(mknodat)\n(long long res, long long fd_, void *path_, long long mode_, long long PAD_,\n  long long dev_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(mkdirat)(long long fd_, void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(mkdirat)\n(long long res, long long fd_, void *path_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(faccessat)\n(long long fd_, void *path_, long long amode_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(faccessat)\n(long long res, long long fd_, void *path_, long long amode_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(fchmodat)\n(long long fd_, void *path_, long long mode_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(fchmodat)\n(long long res, long long fd_, void *path_, long long mode_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(fchownat)\n(long long fd_, void *path_, long long owner_, long long group_,\n  long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(fchownat)\n(long long res, long long fd_, void *path_, long long owner_, long long group_,\n  long long flag_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(fexecve)(long long fd_, void *argp_, void *envp_) { /* TODO */ }\nPOST_SYSCALL(fexecve)(long long res, long long fd_, void *argp_, void *envp_) {\n  /* TODO */\n}\nPRE_SYSCALL(fstatat)(long long fd_, void *path_, void *buf_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(fstatat)\n(long long res, long long fd_, void *path_, void *buf_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(utimensat)\n(long long fd_, void *path_, void *tptr_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n  if (tptr_) {\n    PRE_READ(tptr_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(utimensat)\n(long long res, long long fd_, void *path_, void *tptr_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (res > 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n    if (tptr_) {\n      POST_READ(tptr_, struct_timespec_sz);\n    }\n  }\n}\nPRE_SYSCALL(openat)\n(long long fd_, void *path_, long long oflags_, long long mode_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(openat)\n(long long res, long long fd_, void *path_, long long oflags_,\n  long long mode_) {\n  const char *path = (const char *)path_;\n  if (res > 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(readlinkat)\n(long long fd_, void *path_, void *buf_, long long bufsize_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(readlinkat)\n(long long res, long long fd_, void *path_, void *buf_, long long bufsize_) {\n  const char *path = (const char *)path_;\n  if (res > 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(symlinkat)(void *path1_, long long fd_, void *path2_) {\n  const char *path1 = (const char *)path1_;\n  const char *path2 = (const char *)path2_;\n  if (path1) {\n    PRE_READ(path1, __sanitizer::internal_strlen(path1) + 1);\n  }\n  if (path2) {\n    PRE_READ(path2, __sanitizer::internal_strlen(path2) + 1);\n  }\n}\nPOST_SYSCALL(symlinkat)\n(long long res, void *path1_, long long fd_, void *path2_) {\n  const char *path1 = (const char *)path1_;\n  const char *path2 = (const char *)path2_;\n  if (res == 0) {\n    if (path1) {\n      POST_READ(path1, __sanitizer::internal_strlen(path1) + 1);\n    }\n    if (path2) {\n      POST_READ(path2, __sanitizer::internal_strlen(path2) + 1);\n    }\n  }\n}\nPRE_SYSCALL(unlinkat)(long long fd_, void *path_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(unlinkat)\n(long long res, long long fd_, void *path_, long long flag_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(futimens)(long long fd_, void *tptr_) {\n  struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;\n  if (tptr) {\n    PRE_READ(tptr[0], struct_timespec_sz);\n    PRE_READ(tptr[1], struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(futimens)(long long res, long long fd_, void *tptr_) {\n  struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;\n  if (res == 0) {\n    if (tptr) {\n      POST_READ(tptr[0], struct_timespec_sz);\n      POST_READ(tptr[1], struct_timespec_sz);\n    }\n  }\n}\nPRE_SYSCALL(__quotactl)(void *path_, void *args_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__quotactl)(long long res, void *path_, void *args_) {\n  const char *path = (const char *)path_;\n  if (res == 0) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(posix_spawn)\n(void *pid_, void *path_, void *file_actions_, void *attrp_, void *argv_,\n  void *envp_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(posix_spawn)\n(long long res, void *pid_, void *path_, void *file_actions_, void *attrp_,\n  void *argv_, void *envp_) {\n  const char *path = (const char *)path_;\n  if (pid_) {\n    if (path) {\n      POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n    }\n  }\n}\nPRE_SYSCALL(recvmmsg)\n(long long s_, void *mmsg_, long long vlen_, long long flags_, void *timeout_) {\n  if (timeout_) {\n    PRE_READ(timeout_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(recvmmsg)\n(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_,\n  void *timeout_) {\n  if (res >= 0) {\n    if (timeout_) {\n      POST_READ(timeout_, struct_timespec_sz);\n    }\n  }\n}\nPRE_SYSCALL(sendmmsg)\n(long long s_, void *mmsg_, long long vlen_, long long flags_) {\n  struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;\n  if (mmsg) {\n    PRE_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) *\n                       (vlen_ > 1024 ? 1024 : vlen_));\n  }\n}\nPOST_SYSCALL(sendmmsg)\n(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_) {\n  struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;\n  if (res >= 0) {\n    if (mmsg) {\n      POST_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) *\n                          (vlen_ > 1024 ? 1024 : vlen_));\n    }\n  }\n}\nPRE_SYSCALL(clock_nanosleep)\n(long long clock_id_, long long flags_, void *rqtp_, void *rmtp_) {\n  if (rqtp_) {\n    PRE_READ(rqtp_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(clock_nanosleep)\n(long long res, long long clock_id_, long long flags_, void *rqtp_,\n  void *rmtp_) {\n  if (rqtp_) {\n    POST_READ(rqtp_, struct_timespec_sz);\n  }\n}\nPRE_SYSCALL(___lwp_park60)\n(long long clock_id_, long long flags_, void *ts_, long long unpark_,\n  void *hint_, void *unparkhint_) {\n  if (ts_) {\n    PRE_READ(ts_, struct_timespec_sz);\n  }\n}\nPOST_SYSCALL(___lwp_park60)\n(long long res, long long clock_id_, long long flags_, void *ts_,\n  long long unpark_, void *hint_, void *unparkhint_) {\n  if (res == 0) {\n    if (ts_) {\n      POST_READ(ts_, struct_timespec_sz);\n    }\n  }\n}\nPRE_SYSCALL(posix_fallocate)\n(long long fd_, long long PAD_, long long pos_, long long len_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(posix_fallocate)\n(long long res, long long fd_, long long PAD_, long long pos_, long long len_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(fdiscard)\n(long long fd_, long long PAD_, long long pos_, long long len_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(fdiscard)\n(long long res, long long fd_, long long PAD_, long long pos_, long long len_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(wait6)\n(long long idtype_, long long id_, void *status_, long long options_,\n  void *wru_, void *info_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(wait6)\n(long long res, long long idtype_, long long id_, void *status_,\n  long long options_, void *wru_, void *info_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(clock_getcpuclockid2)\n(long long idtype_, long long id_, void *clock_id_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(clock_getcpuclockid2)\n(long long res, long long idtype_, long long id_, void *clock_id_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__getvfsstat90)(void *buf_, long long bufsize_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__getvfsstat90)\n(long long res, void *buf_, long long bufsize_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__statvfs190)(void *path_, void *buf_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    PRE_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPOST_SYSCALL(__statvfs190)\n(long long res, void *path_, void *buf_, long long flags_) {\n  const char *path = (const char *)path_;\n  if (path) {\n    POST_READ(path, __sanitizer::internal_strlen(path) + 1);\n  }\n}\nPRE_SYSCALL(__fstatvfs190)(long long fd_, void *buf_, long long flags_) {\n  /* Nothing to do */\n}\nPOST_SYSCALL(__fstatvfs190)\n(long long res, long long fd_, void *buf_, long long flags_) {\n  /* Nothing to do */\n}\nPRE_SYSCALL(__fhstatvfs190)\n(void *fhp_, long long fh_size_, void *buf_, long long flags_) {\n  if (fhp_) {\n    PRE_READ(fhp_, fh_size_);\n  }\n}\nPOST_SYSCALL(__fhstatvfs190)\n(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}\nPRE_SYSCALL(__acl_get_link)(void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_get_link)\n(long long res, void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_set_link)(void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_set_link)\n(long long res, void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_delete_link)(void *path_, long long type_) { /* TODO */ }\nPOST_SYSCALL(__acl_delete_link)(long long res, void *path_, long long type_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_aclcheck_link)(void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_aclcheck_link)\n(long long res, void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_get_file)(void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_get_file)\n(long long res, void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_set_file)(void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_set_file)\n(long long res, void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_get_fd)(long long filedes_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_get_fd)\n(long long res, long long filedes_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_set_fd)(long long filedes_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_set_fd)\n(long long res, long long filedes_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_delete_file)(void *path_, long long type_) { /* TODO */ }\nPOST_SYSCALL(__acl_delete_file)(long long res, void *path_, long long type_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_delete_fd)(long long filedes_, long long type_) { /* TODO */ }\nPOST_SYSCALL(__acl_delete_fd)\n(long long res, long long filedes_, long long type_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_aclcheck_file)(void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_aclcheck_file)\n(long long res, void *path_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(__acl_aclcheck_fd)\n(long long filedes_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPOST_SYSCALL(__acl_aclcheck_fd)\n(long long res, long long filedes_, long long type_, void *aclp_) {\n  /* TODO */\n}\nPRE_SYSCALL(lpathconf)(void *path_, long long name_) { /* TODO */ }\nPOST_SYSCALL(lpathconf)(long long res, void *path_, long long name_) {\n  /* TODO */\n}\n#undef SYS_MAXSYSARGS\n} // extern \"C\"\n\n#undef PRE_SYSCALL\n#undef PRE_READ\n#undef PRE_WRITE\n#undef POST_SYSCALL\n#undef POST_READ\n#undef POST_WRITE\n\n#endif // SANITIZER_NETBSD\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_termination.cpp",
    "content": "//===-- sanitizer_termination.cpp -------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n///\n/// This file contains the Sanitizer termination functions CheckFailed and Die,\n/// and the callback functionalities associated with them.\n///\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_libc.h\"\n\nnamespace __sanitizer {\n\nstatic const int kMaxNumOfInternalDieCallbacks = 5;\nstatic DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];\n\nbool AddDieCallback(DieCallbackType callback) {\n  for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {\n    if (InternalDieCallbacks[i] == nullptr) {\n      InternalDieCallbacks[i] = callback;\n      return true;\n    }\n  }\n  return false;\n}\n\nbool RemoveDieCallback(DieCallbackType callback) {\n  for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {\n    if (InternalDieCallbacks[i] == callback) {\n      internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],\n                       sizeof(InternalDieCallbacks[0]) *\n                           (kMaxNumOfInternalDieCallbacks - i - 1));\n      InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;\n      return true;\n    }\n  }\n  return false;\n}\n\nstatic DieCallbackType UserDieCallback;\nvoid SetUserDieCallback(DieCallbackType callback) {\n  UserDieCallback = callback;\n}\n\nvoid NORETURN Die() {\n  if (UserDieCallback)\n    UserDieCallback();\n  for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {\n    if (InternalDieCallbacks[i])\n      InternalDieCallbacks[i]();\n  }\n  if (common_flags()->abort_on_error)\n    Abort();\n  internal__exit(common_flags()->exitcode);\n}\n\nstatic void (*CheckUnwindCallback)();\nvoid SetCheckUnwindCallback(void (*callback)()) {\n  CheckUnwindCallback = callback;\n}\n\nvoid NORETURN CheckFailed(const char *file, int line, const char *cond,\n                          u64 v1, u64 v2) {\n  u32 tid = GetTid();\n  Printf(\"%s: CHECK failed: %s:%d \\\"%s\\\" (0x%zx, 0x%zx) (tid=%u)\\n\",\n         SanitizerToolName, StripModuleName(file), line, cond, (uptr)v1,\n         (uptr)v2, tid);\n  static atomic_uint32_t first_tid;\n  u32 cmp = 0;\n  if (!atomic_compare_exchange_strong(&first_tid, &cmp, tid,\n                                      memory_order_relaxed)) {\n    if (cmp == tid) {\n      // Recursing into CheckFailed.\n    } else {\n      // Another thread fails already, let it print the stack and terminate.\n      SleepForSeconds(2);\n    }\n    Trap();\n  }\n  if (CheckUnwindCallback)\n    CheckUnwindCallback();\n  Die();\n}\n\n} // namespace __sanitizer\n\nusing namespace __sanitizer;\n\nextern \"C\" {\nSANITIZER_INTERFACE_ATTRIBUTE\nvoid __sanitizer_set_death_callback(void (*callback)(void)) {\n  SetUserDieCallback(callback);\n}\n}  // extern \"C\"\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_thread_registry.cpp",
    "content": "//===-- sanitizer_thread_registry.cpp -------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizer tools.\n//\n// General thread bookkeeping functionality.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_thread_registry.h\"\n\n#include \"sanitizer_placement_new.h\"\n\nnamespace __sanitizer {\n\nThreadContextBase::ThreadContextBase(u32 tid)\n    : tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),\n      status(ThreadStatusInvalid), detached(false),\n      thread_type(ThreadType::Regular), parent_tid(0), next(0) {\n  name[0] = '\\0';\n  atomic_store(&thread_destroyed, 0, memory_order_release);\n}\n\nThreadContextBase::~ThreadContextBase() {\n  // ThreadContextBase should never be deleted.\n  CHECK(0);\n}\n\nvoid ThreadContextBase::SetName(const char *new_name) {\n  name[0] = '\\0';\n  if (new_name) {\n    internal_strncpy(name, new_name, sizeof(name));\n    name[sizeof(name) - 1] = '\\0';\n  }\n}\n\nvoid ThreadContextBase::SetDead() {\n  CHECK(status == ThreadStatusRunning ||\n        status == ThreadStatusFinished);\n  status = ThreadStatusDead;\n  user_id = 0;\n  OnDead();\n}\n\nvoid ThreadContextBase::SetDestroyed() {\n  atomic_store(&thread_destroyed, 1, memory_order_release);\n}\n\nbool ThreadContextBase::GetDestroyed() {\n  return !!atomic_load(&thread_destroyed, memory_order_acquire);\n}\n\nvoid ThreadContextBase::SetJoined(void *arg) {\n  // FIXME(dvyukov): print message and continue (it's user error).\n  CHECK_EQ(false, detached);\n  CHECK_EQ(ThreadStatusFinished, status);\n  status = ThreadStatusDead;\n  user_id = 0;\n  OnJoined(arg);\n}\n\nvoid ThreadContextBase::SetFinished() {\n  // ThreadRegistry::FinishThread calls here in ThreadStatusCreated state\n  // for a thread that never actually started.  In that case the thread\n  // should go to ThreadStatusFinished regardless of whether it was created\n  // as detached.\n  if (!detached || status == ThreadStatusCreated) status = ThreadStatusFinished;\n  OnFinished();\n}\n\nvoid ThreadContextBase::SetStarted(tid_t _os_id, ThreadType _thread_type,\n                                   void *arg) {\n  status = ThreadStatusRunning;\n  os_id = _os_id;\n  thread_type = _thread_type;\n  OnStarted(arg);\n}\n\nvoid ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,\n                                   bool _detached, u32 _parent_tid, void *arg) {\n  status = ThreadStatusCreated;\n  user_id = _user_id;\n  unique_id = _unique_id;\n  detached = _detached;\n  // Parent tid makes no sense for the main thread.\n  if (tid != kMainTid)\n    parent_tid = _parent_tid;\n  OnCreated(arg);\n}\n\nvoid ThreadContextBase::Reset() {\n  status = ThreadStatusInvalid;\n  SetName(0);\n  atomic_store(&thread_destroyed, 0, memory_order_release);\n  OnReset();\n}\n\n// ThreadRegistry implementation.\n\nThreadRegistry::ThreadRegistry(ThreadContextFactory factory)\n    : ThreadRegistry(factory, UINT32_MAX, UINT32_MAX, 0) {}\n\nThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,\n                               u32 thread_quarantine_size, u32 max_reuse)\n    : context_factory_(factory),\n      max_threads_(max_threads),\n      thread_quarantine_size_(thread_quarantine_size),\n      max_reuse_(max_reuse),\n      mtx_(MutexThreadRegistry),\n      total_threads_(0),\n      alive_threads_(0),\n      max_alive_threads_(0),\n      running_threads_(0) {\n  dead_threads_.clear();\n  invalid_threads_.clear();\n}\n\nvoid ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,\n                                        uptr *alive) {\n  ThreadRegistryLock l(this);\n  if (total)\n    *total = threads_.size();\n  if (running) *running = running_threads_;\n  if (alive) *alive = alive_threads_;\n}\n\nuptr ThreadRegistry::GetMaxAliveThreads() {\n  ThreadRegistryLock l(this);\n  return max_alive_threads_;\n}\n\nu32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,\n                                 void *arg) {\n  ThreadRegistryLock l(this);\n  u32 tid = kInvalidTid;\n  ThreadContextBase *tctx = QuarantinePop();\n  if (tctx) {\n    tid = tctx->tid;\n  } else if (threads_.size() < max_threads_) {\n    // Allocate new thread context and tid.\n    tid = threads_.size();\n    tctx = context_factory_(tid);\n    threads_.push_back(tctx);\n  } else {\n#if !SANITIZER_GO\n    Report(\"%s: Thread limit (%u threads) exceeded. Dying.\\n\",\n           SanitizerToolName, max_threads_);\n#else\n    Printf(\"race: limit on %u simultaneously alive goroutines is exceeded,\"\n        \" dying\\n\", max_threads_);\n#endif\n    Die();\n  }\n  CHECK_NE(tctx, 0);\n  CHECK_NE(tid, kInvalidTid);\n  CHECK_LT(tid, max_threads_);\n  CHECK_EQ(tctx->status, ThreadStatusInvalid);\n  alive_threads_++;\n  if (max_alive_threads_ < alive_threads_) {\n    max_alive_threads_++;\n    CHECK_EQ(alive_threads_, max_alive_threads_);\n  }\n  if (user_id) {\n    // Ensure that user_id is unique. If it's not the case we are screwed.\n    // Ignoring this situation may lead to very hard to debug false\n    // positives later (e.g. if we join a wrong thread).\n    CHECK(live_.try_emplace(user_id, tid).second);\n  }\n  tctx->SetCreated(user_id, total_threads_++, detached,\n                   parent_tid, arg);\n  return tid;\n}\n\nvoid ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,\n                                                    void *arg) {\n  CheckLocked();\n  for (u32 tid = 0; tid < threads_.size(); tid++) {\n    ThreadContextBase *tctx = threads_[tid];\n    if (tctx == 0)\n      continue;\n    cb(tctx, arg);\n  }\n}\n\nu32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {\n  ThreadRegistryLock l(this);\n  for (u32 tid = 0; tid < threads_.size(); tid++) {\n    ThreadContextBase *tctx = threads_[tid];\n    if (tctx != 0 && cb(tctx, arg))\n      return tctx->tid;\n  }\n  return kInvalidTid;\n}\n\nThreadContextBase *\nThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {\n  CheckLocked();\n  for (u32 tid = 0; tid < threads_.size(); tid++) {\n    ThreadContextBase *tctx = threads_[tid];\n    if (tctx != 0 && cb(tctx, arg))\n      return tctx;\n  }\n  return 0;\n}\n\nstatic bool FindThreadContextByOsIdCallback(ThreadContextBase *tctx,\n                                            void *arg) {\n  return (tctx->os_id == (uptr)arg && tctx->status != ThreadStatusInvalid &&\n      tctx->status != ThreadStatusDead);\n}\n\nThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {\n  return FindThreadContextLocked(FindThreadContextByOsIdCallback,\n                                 (void *)os_id);\n}\n\nvoid ThreadRegistry::SetThreadName(u32 tid, const char *name) {\n  ThreadRegistryLock l(this);\n  ThreadContextBase *tctx = threads_[tid];\n  CHECK_NE(tctx, 0);\n  CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,\n           tctx->status);\n  tctx->SetName(name);\n}\n\nvoid ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {\n  ThreadRegistryLock l(this);\n  if (const auto *tid = live_.find(user_id))\n    threads_[tid->second]->SetName(name);\n}\n\nvoid ThreadRegistry::DetachThread(u32 tid, void *arg) {\n  ThreadRegistryLock l(this);\n  ThreadContextBase *tctx = threads_[tid];\n  CHECK_NE(tctx, 0);\n  if (tctx->status == ThreadStatusInvalid) {\n    Report(\"%s: Detach of non-existent thread\\n\", SanitizerToolName);\n    return;\n  }\n  tctx->OnDetached(arg);\n  if (tctx->status == ThreadStatusFinished) {\n    if (tctx->user_id)\n      live_.erase(tctx->user_id);\n    tctx->SetDead();\n    QuarantinePush(tctx);\n  } else {\n    tctx->detached = true;\n  }\n}\n\nvoid ThreadRegistry::JoinThread(u32 tid, void *arg) {\n  bool destroyed = false;\n  do {\n    {\n      ThreadRegistryLock l(this);\n      ThreadContextBase *tctx = threads_[tid];\n      CHECK_NE(tctx, 0);\n      if (tctx->status == ThreadStatusInvalid) {\n        Report(\"%s: Join of non-existent thread\\n\", SanitizerToolName);\n        return;\n      }\n      if ((destroyed = tctx->GetDestroyed())) {\n        if (tctx->user_id)\n          live_.erase(tctx->user_id);\n        tctx->SetJoined(arg);\n        QuarantinePush(tctx);\n      }\n    }\n    if (!destroyed)\n      internal_sched_yield();\n  } while (!destroyed);\n}\n\n// Normally this is called when the thread is about to exit.  If\n// called in ThreadStatusCreated state, then this thread was never\n// really started.  We just did CreateThread for a prospective new\n// thread before trying to create it, and then failed to actually\n// create it, and so never called StartThread.\nThreadStatus ThreadRegistry::FinishThread(u32 tid) {\n  ThreadRegistryLock l(this);\n  CHECK_GT(alive_threads_, 0);\n  alive_threads_--;\n  ThreadContextBase *tctx = threads_[tid];\n  CHECK_NE(tctx, 0);\n  bool dead = tctx->detached;\n  ThreadStatus prev_status = tctx->status;\n  if (tctx->status == ThreadStatusRunning) {\n    CHECK_GT(running_threads_, 0);\n    running_threads_--;\n  } else {\n    // The thread never really existed.\n    CHECK_EQ(tctx->status, ThreadStatusCreated);\n    dead = true;\n  }\n  tctx->SetFinished();\n  if (dead) {\n    if (tctx->user_id)\n      live_.erase(tctx->user_id);\n    tctx->SetDead();\n    QuarantinePush(tctx);\n  }\n  tctx->SetDestroyed();\n  return prev_status;\n}\n\nvoid ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,\n                                 void *arg) {\n  ThreadRegistryLock l(this);\n  running_threads_++;\n  ThreadContextBase *tctx = threads_[tid];\n  CHECK_NE(tctx, 0);\n  CHECK_EQ(ThreadStatusCreated, tctx->status);\n  tctx->SetStarted(os_id, thread_type, arg);\n}\n\nvoid ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {\n  if (tctx->tid == 0)\n    return;  // Don't reuse the main thread.  It's a special snowflake.\n  dead_threads_.push_back(tctx);\n  if (dead_threads_.size() <= thread_quarantine_size_)\n    return;\n  tctx = dead_threads_.front();\n  dead_threads_.pop_front();\n  CHECK_EQ(tctx->status, ThreadStatusDead);\n  tctx->Reset();\n  tctx->reuse_count++;\n  if (max_reuse_ > 0 && tctx->reuse_count >= max_reuse_)\n    return;\n  invalid_threads_.push_back(tctx);\n}\n\nThreadContextBase *ThreadRegistry::QuarantinePop() {\n  if (invalid_threads_.size() == 0)\n    return 0;\n  ThreadContextBase *tctx = invalid_threads_.front();\n  invalid_threads_.pop_front();\n  return tctx;\n}\n\nu32 ThreadRegistry::ConsumeThreadUserId(uptr user_id) {\n  ThreadRegistryLock l(this);\n  u32 tid;\n  auto *t = live_.find(user_id);\n  CHECK(t);\n  tid = t->second;\n  live_.erase(t);\n  auto *tctx = threads_[tid];\n  CHECK_EQ(tctx->user_id, user_id);\n  tctx->user_id = 0;\n  return tid;\n}\n\nvoid ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {\n  ThreadRegistryLock l(this);\n  ThreadContextBase *tctx = threads_[tid];\n  CHECK_NE(tctx, 0);\n  CHECK_NE(tctx->status, ThreadStatusInvalid);\n  CHECK_NE(tctx->status, ThreadStatusDead);\n  CHECK_EQ(tctx->user_id, 0);\n  tctx->user_id = user_id;\n  CHECK(live_.try_emplace(user_id, tctx->tid).second);\n}\n\nu32 ThreadRegistry::OnFork(u32 tid) {\n  ThreadRegistryLock l(this);\n  // We only purge user_id (pthread_t) of live threads because\n  // they cause CHECK failures if new threads with matching pthread_t\n  // created after fork.\n  // Potentially we could purge more info (ThreadContextBase themselves),\n  // but it's hard to test and easy to introduce new issues by doing this.\n  for (auto *tctx : threads_) {\n    if (tctx->tid == tid || !tctx->user_id)\n      continue;\n    CHECK(live_.erase(tctx->user_id));\n    tctx->user_id = 0;\n  }\n  return alive_threads_;\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_thread_registry.h",
    "content": "//===-- sanitizer_thread_registry.h -----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizer tools.\n//\n// General thread bookkeeping functionality.\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_THREAD_REGISTRY_H\n#define SANITIZER_THREAD_REGISTRY_H\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_dense_map.h\"\n#include \"sanitizer_list.h\"\n#include \"sanitizer_mutex.h\"\n\nnamespace __sanitizer {\n\nenum ThreadStatus {\n  ThreadStatusInvalid,   // Non-existent thread, data is invalid.\n  ThreadStatusCreated,   // Created but not yet running.\n  ThreadStatusRunning,   // The thread is currently running.\n  ThreadStatusFinished,  // Joinable thread is finished but not yet joined.\n  ThreadStatusDead       // Joined, but some info is still available.\n};\n\nenum class ThreadType {\n  Regular, // Normal thread\n  Worker,  // macOS Grand Central Dispatch (GCD) worker thread\n  Fiber,   // Fiber\n};\n\n// Generic thread context. Specific sanitizer tools may inherit from it.\n// If thread is dead, context may optionally be reused for a new thread.\nclass ThreadContextBase {\n public:\n  explicit ThreadContextBase(u32 tid);\n  const u32 tid;  // Thread ID. Main thread should have tid = 0.\n  u64 unique_id;  // Unique thread ID.\n  u32 reuse_count;  // Number of times this tid was reused.\n  tid_t os_id;     // PID (used for reporting).\n  uptr user_id;   // Some opaque user thread id (e.g. pthread_t).\n  char name[64];  // As annotated by user.\n\n  ThreadStatus status;\n  bool detached;\n  ThreadType thread_type;\n\n  u32 parent_tid;\n  ThreadContextBase *next;  // For storing thread contexts in a list.\n\n  atomic_uint32_t thread_destroyed; // To address race of Joined vs Finished\n\n  void SetName(const char *new_name);\n\n  void SetDead();\n  void SetJoined(void *arg);\n  void SetFinished();\n  void SetStarted(tid_t _os_id, ThreadType _thread_type, void *arg);\n  void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,\n                  u32 _parent_tid, void *arg);\n  void Reset();\n\n  void SetDestroyed();\n  bool GetDestroyed();\n\n  // The following methods may be overriden by subclasses.\n  // Some of them take opaque arg that may be optionally be used\n  // by subclasses.\n  virtual void OnDead() {}\n  virtual void OnJoined(void *arg) {}\n  virtual void OnFinished() {}\n  virtual void OnStarted(void *arg) {}\n  virtual void OnCreated(void *arg) {}\n  virtual void OnReset() {}\n  virtual void OnDetached(void *arg) {}\n\n protected:\n  ~ThreadContextBase();\n};\n\ntypedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);\n\nclass SANITIZER_MUTEX ThreadRegistry {\n public:\n  ThreadRegistry(ThreadContextFactory factory);\n  ThreadRegistry(ThreadContextFactory factory, u32 max_threads,\n                 u32 thread_quarantine_size, u32 max_reuse);\n  void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,\n                          uptr *alive = nullptr);\n  uptr GetMaxAliveThreads();\n\n  void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }\n  void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }\n  void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }\n\n  // Should be guarded by ThreadRegistryLock.\n  ThreadContextBase *GetThreadLocked(u32 tid) {\n    return threads_.empty() ? nullptr : threads_[tid];\n  }\n\n  u32 NumThreadsLocked() const { return threads_.size(); }\n\n  u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);\n\n  typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);\n  // Invokes callback with a specified arg for each thread context.\n  // Should be guarded by ThreadRegistryLock.\n  void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg);\n\n  typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg);\n  // Finds a thread using the provided callback. Returns kInvalidTid if no\n  // thread is found.\n  u32 FindThread(FindThreadCallback cb, void *arg);\n  // Should be guarded by ThreadRegistryLock. Return 0 if no thread\n  // is found.\n  ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb,\n                                             void *arg);\n  ThreadContextBase *FindThreadContextByOsIDLocked(tid_t os_id);\n\n  void SetThreadName(u32 tid, const char *name);\n  void SetThreadNameByUserId(uptr user_id, const char *name);\n  void DetachThread(u32 tid, void *arg);\n  void JoinThread(u32 tid, void *arg);\n  // Finishes thread and returns previous status.\n  ThreadStatus FinishThread(u32 tid);\n  void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);\n  u32 ConsumeThreadUserId(uptr user_id);\n  void SetThreadUserId(u32 tid, uptr user_id);\n\n  // OnFork must be called in the child process after fork to purge old\n  // threads that don't exist anymore (except for the current thread tid).\n  // Returns number of alive threads before fork.\n  u32 OnFork(u32 tid);\n\n private:\n  const ThreadContextFactory context_factory_;\n  const u32 max_threads_;\n  const u32 thread_quarantine_size_;\n  const u32 max_reuse_;\n\n  Mutex mtx_;\n\n  u64 total_threads_;   // Total number of created threads. May be greater than\n                        // max_threads_ if contexts were reused.\n  uptr alive_threads_;  // Created or running.\n  uptr max_alive_threads_;\n  uptr running_threads_;\n\n  InternalMmapVector<ThreadContextBase *> threads_;\n  IntrusiveList<ThreadContextBase> dead_threads_;\n  IntrusiveList<ThreadContextBase> invalid_threads_;\n  DenseMap<uptr, Tid> live_;\n\n  void QuarantinePush(ThreadContextBase *tctx);\n  ThreadContextBase *QuarantinePop();\n};\n\ntypedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock;\n\n} // namespace __sanitizer\n\n#endif // SANITIZER_THREAD_REGISTRY_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_thread_safety.h",
    "content": "//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizer tools.\n//\n// Wrappers around thread safety annotations.\n// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_THREAD_SAFETY_H\n#define SANITIZER_THREAD_SAFETY_H\n\n#if defined(__clang__)\n#  define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x))\n#else\n#  define SANITIZER_THREAD_ANNOTATION(x)\n#endif\n\n#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability(\"mutex\"))\n#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable)\n#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x))\n#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x))\n#define SANITIZER_REQUIRES(...) \\\n  SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__))\n#define SANITIZER_REQUIRES_SHARED(...) \\\n  SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))\n#define SANITIZER_ACQUIRE(...) \\\n  SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))\n#define SANITIZER_ACQUIRE_SHARED(...) \\\n  SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))\n#define SANITIZER_TRY_ACQUIRE(...) \\\n  SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))\n#define SANITIZER_RELEASE(...) \\\n  SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__))\n#define SANITIZER_RELEASE_SHARED(...) \\\n  SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))\n#define SANITIZER_EXCLUDES(...) \\\n  SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))\n#define SANITIZER_CHECK_LOCKED(...) \\\n  SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__))\n#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \\\n  SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis)\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_tls_get_addr.cpp",
    "content": "//===-- sanitizer_tls_get_addr.cpp ----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Handle the __tls_get_addr call.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_tls_get_addr.h\"\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_flags.h\"\n#include \"sanitizer_platform_interceptors.h\"\n\nnamespace __sanitizer {\n#if SANITIZER_INTERCEPT_TLS_GET_ADDR\n\n// The actual parameter that comes to __tls_get_addr\n// is a pointer to a struct with two words in it:\nstruct TlsGetAddrParam {\n  uptr dso_id;\n  uptr offset;\n};\n\n// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,\n// which has such header.\nstruct Glibc_2_19_tls_header {\n  uptr size;\n  uptr start;\n};\n\n// This must be static TLS\n__attribute__((tls_model(\"initial-exec\")))\nstatic __thread DTLS dtls;\n\n// Make sure we properly destroy the DTLS objects:\n// this counter should never get too large.\nstatic atomic_uintptr_t number_of_live_dtls;\n\nstatic const uptr kDestroyedThread = -1;\n\nstatic void DTLS_Deallocate(DTLS::DTVBlock *block) {\n  VReport(2, \"__tls_get_addr: DTLS_Deallocate %p\\n\", (void *)block);\n  UnmapOrDie(block, sizeof(DTLS::DTVBlock));\n  atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);\n}\n\nstatic DTLS::DTVBlock *DTLS_NextBlock(atomic_uintptr_t *cur) {\n  uptr v = atomic_load(cur, memory_order_acquire);\n  if (v == kDestroyedThread)\n    return nullptr;\n  DTLS::DTVBlock *next = (DTLS::DTVBlock *)v;\n  if (next)\n    return next;\n  DTLS::DTVBlock *new_dtv =\n      (DTLS::DTVBlock *)MmapOrDie(sizeof(DTLS::DTVBlock), \"DTLS_NextBlock\");\n  uptr prev = 0;\n  if (!atomic_compare_exchange_strong(cur, &prev, (uptr)new_dtv,\n                                      memory_order_seq_cst)) {\n    UnmapOrDie(new_dtv, sizeof(DTLS::DTVBlock));\n    return (DTLS::DTVBlock *)prev;\n  }\n  uptr num_live_dtls =\n      atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);\n  VReport(2, \"__tls_get_addr: DTLS_NextBlock %p %zd\\n\", (void *)&dtls,\n          num_live_dtls);\n  return new_dtv;\n}\n\nstatic DTLS::DTV *DTLS_Find(uptr id) {\n  VReport(2, \"__tls_get_addr: DTLS_Find %p %zd\\n\", (void *)&dtls, id);\n  static constexpr uptr kPerBlock = ARRAY_SIZE(DTLS::DTVBlock::dtvs);\n  DTLS::DTVBlock *cur = DTLS_NextBlock(&dtls.dtv_block);\n  if (!cur)\n    return nullptr;\n  for (; id >= kPerBlock; id -= kPerBlock) cur = DTLS_NextBlock(&cur->next);\n  return cur->dtvs + id;\n}\n\nvoid DTLS_Destroy() {\n  if (!common_flags()->intercept_tls_get_addr) return;\n  VReport(2, \"__tls_get_addr: DTLS_Destroy %p\\n\", (void *)&dtls);\n  DTLS::DTVBlock *block = (DTLS::DTVBlock *)atomic_exchange(\n      &dtls.dtv_block, kDestroyedThread, memory_order_release);\n  while (block) {\n    DTLS::DTVBlock *next =\n        (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire);\n    DTLS_Deallocate(block);\n    block = next;\n  }\n}\n\n#if defined(__powerpc64__) || defined(__mips__)\n// This is glibc's TLS_DTV_OFFSET:\n// \"Dynamic thread vector pointers point 0x8000 past the start of each\n//  TLS block.\" (sysdeps/<arch>/dl-tls.h)\nstatic const uptr kDtvOffset = 0x8000;\n#elif defined(__riscv)\n// This is glibc's TLS_DTV_OFFSET:\n// \"Dynamic thread vector pointers point 0x800 past the start of each\n// TLS block.\" (sysdeps/riscv/dl-tls.h)\nstatic const uptr kDtvOffset = 0x800;\n#else\nstatic const uptr kDtvOffset = 0;\n#endif\n\nDTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,\n                                uptr static_tls_begin, uptr static_tls_end) {\n  if (!common_flags()->intercept_tls_get_addr) return 0;\n  TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);\n  uptr dso_id = arg->dso_id;\n  DTLS::DTV *dtv = DTLS_Find(dso_id);\n  if (!dtv || dtv->beg)\n    return 0;\n  uptr tls_size = 0;\n  uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;\n  VReport(2,\n          \"__tls_get_addr: %p {0x%zx,0x%zx} => %p; tls_beg: 0x%zx; sp: %p \"\n          \"num_live_dtls %zd\\n\",\n          (void *)arg, arg->dso_id, arg->offset, res, tls_beg, (void *)&tls_beg,\n          atomic_load(&number_of_live_dtls, memory_order_relaxed));\n  if (dtls.last_memalign_ptr == tls_beg) {\n    tls_size = dtls.last_memalign_size;\n    VReport(2, \"__tls_get_addr: glibc <=2.18 suspected; tls={0x%zx,0x%zx}\\n\",\n            tls_beg, tls_size);\n  } else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {\n    // This is the static TLS block which was initialized / unpoisoned at thread\n    // creation.\n    VReport(2, \"__tls_get_addr: static tls: 0x%zx\\n\", tls_beg);\n    tls_size = 0;\n  } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {\n    // We may want to check gnu_get_libc_version().\n    Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;\n    tls_size = header->size;\n    tls_beg = header->start;\n    VReport(2, \"__tls_get_addr: glibc >=2.19 suspected; tls={0x%zx 0x%zx}\\n\",\n            tls_beg, tls_size);\n  } else {\n    VReport(2, \"__tls_get_addr: Can't guess glibc version\\n\");\n    // This may happen inside the DTOR of main thread, so just ignore it.\n    tls_size = 0;\n  }\n  dtv->beg = tls_beg;\n  dtv->size = tls_size;\n  return dtv;\n}\n\nvoid DTLS_on_libc_memalign(void *ptr, uptr size) {\n  if (!common_flags()->intercept_tls_get_addr) return;\n  VReport(2, \"DTLS_on_libc_memalign: %p 0x%zx\\n\", ptr, size);\n  dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);\n  dtls.last_memalign_size = size;\n}\n\nDTLS *DTLS_Get() { return &dtls; }\n\nbool DTLSInDestruction(DTLS *dtls) {\n  return atomic_load(&dtls->dtv_block, memory_order_relaxed) ==\n         kDestroyedThread;\n}\n\n#else\nvoid DTLS_on_libc_memalign(void *ptr, uptr size) {}\nDTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res,\n  unsigned long, unsigned long) { return 0; }\nDTLS *DTLS_Get() { return 0; }\nvoid DTLS_Destroy() {}\nbool DTLSInDestruction(DTLS *dtls) {\n  UNREACHABLE(\"dtls is unsupported on this platform!\");\n}\n\n#endif  // SANITIZER_INTERCEPT_TLS_GET_ADDR\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_tls_get_addr.h",
    "content": "//===-- sanitizer_tls_get_addr.h --------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Handle the __tls_get_addr call.\n//\n// All this magic is specific to glibc and is required to workaround\n// the lack of interface that would tell us about the Dynamic TLS (DTLS).\n// https://sourceware.org/bugzilla/show_bug.cgi?id=16291\n//\n// The matters get worse because the glibc implementation changed between\n// 2.18 and 2.19:\n// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM\n//\n// Before 2.19, every DTLS chunk is allocated with __libc_memalign,\n// which we intercept and thus know where is the DTLS.\n// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,\n// which is an internal function that wraps a mmap call, neither of which\n// we can intercept. Luckily, __signal_safe_memalign has a simple parseable\n// header which we can use.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_TLS_GET_ADDR_H\n#define SANITIZER_TLS_GET_ADDR_H\n\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_common.h\"\n\nnamespace __sanitizer {\n\nstruct DTLS {\n  // Array of DTLS chunks for the current Thread.\n  // If beg == 0, the chunk is unused.\n  struct DTV {\n    uptr beg, size;\n  };\n  struct DTVBlock {\n    atomic_uintptr_t next;\n    DTV dtvs[(4096UL - sizeof(next)) / sizeof(DTLS::DTV)];\n  };\n\n  static_assert(sizeof(DTVBlock) <= 4096UL, \"Unexpected block size\");\n\n  atomic_uintptr_t dtv_block;\n\n  // Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cpp\n  uptr last_memalign_size;\n  uptr last_memalign_ptr;\n};\n\ntemplate <typename Fn>\nvoid ForEachDVT(DTLS *dtls, const Fn &fn) {\n  DTLS::DTVBlock *block =\n      (DTLS::DTVBlock *)atomic_load(&dtls->dtv_block, memory_order_acquire);\n  while (block) {\n    int id = 0;\n    for (auto &d : block->dtvs) fn(d, id++);\n    block = (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire);\n  }\n}\n\n// Returns pointer and size of a linker-allocated TLS block.\n// Each block is returned exactly once.\nDTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,\n                                uptr static_tls_end);\nvoid DTLS_on_libc_memalign(void *ptr, uptr size);\nDTLS *DTLS_Get();\nvoid DTLS_Destroy();  // Make sure to call this before the thread is destroyed.\n// Returns true if DTLS of suspended thread is in destruction process.\nbool DTLSInDestruction(DTLS *dtls);\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_TLS_GET_ADDR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_type_traits.cpp",
    "content": "//===-- sanitizer_type_traits.cpp -------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implements a subset of C++ type traits. This is so we can avoid depending\n// on system C++ headers.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_type_traits.h\"\n\nnamespace __sanitizer {\n\nconst bool true_type::value;\nconst bool false_type::value;\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_type_traits.h",
    "content": "//===-- sanitizer_type_traits.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implements a subset of C++ type traits. This is so we can avoid depending\n// on system C++ headers.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_TYPE_TRAITS_H\n#define SANITIZER_TYPE_TRAITS_H\n\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\nstruct true_type {\n  static const bool value = true;\n};\n\nstruct false_type {\n  static const bool value = false;\n};\n\n// is_same<T, U>\n//\n// Type trait to compare if types are the same.\n// E.g.\n//\n// ```\n// is_same<int,int>::value - True\n// is_same<int,char>::value - False\n// ```\ntemplate <typename T, typename U>\nstruct is_same : public false_type {};\n\ntemplate <typename T>\nstruct is_same<T, T> : public true_type {};\n\n// conditional<B, T, F>\n//\n// Defines type as T if B is true or as F otherwise.\n// E.g. the following is true\n//\n// ```\n// is_same<int, conditional<true, int, double>::type>::value\n// is_same<double, conditional<false, int, double>::type>::value\n// ```\ntemplate <bool B, class T, class F>\nstruct conditional {\n  using type = T;\n};\n\ntemplate <class T, class F>\nstruct conditional<false, T, F> {\n  using type = F;\n};\n\ntemplate <class T>\nstruct remove_reference {\n  using type = T;\n};\ntemplate <class T>\nstruct remove_reference<T&> {\n  using type = T;\n};\ntemplate <class T>\nstruct remove_reference<T&&> {\n  using type = T;\n};\n\ntemplate <class T>\nWARN_UNUSED_RESULT inline typename remove_reference<T>::type&& move(T&& t) {\n  return static_cast<typename remove_reference<T>::type&&>(t);\n}\n\ntemplate <class T>\nWARN_UNUSED_RESULT inline constexpr T&& forward(\n    typename remove_reference<T>::type& t) {\n  return static_cast<T&&>(t);\n}\n\ntemplate <class T>\nWARN_UNUSED_RESULT inline constexpr T&& forward(\n    typename remove_reference<T>::type&& t) {\n  return static_cast<T&&>(t);\n}\n\ntemplate <class T, T v>\nstruct integral_constant {\n  static constexpr const T value = v;\n  typedef T value_type;\n  typedef integral_constant type;\n  constexpr operator value_type() const { return value; }\n  constexpr value_type operator()() const { return value; }\n};\n\n#ifndef __has_builtin\n#  define __has_builtin(x) 0\n#endif\n\n#if __has_builtin(__is_trivially_destructible)\n\ntemplate <class T>\nstruct is_trivially_destructible\n    : public integral_constant<bool, __is_trivially_destructible(T)> {};\n\n#elif __has_builtin(__has_trivial_destructor)\n\ntemplate <class T>\nstruct is_trivially_destructible\n    : public integral_constant<bool, __has_trivial_destructor(T)> {};\n\n#else\n\ntemplate <class T>\nstruct is_trivially_destructible\n    : public integral_constant<bool, /* less efficient fallback */ false> {};\n\n#endif\n\n#if __has_builtin(__is_trivially_copyable)\n\ntemplate <class T>\nstruct is_trivially_copyable\n    : public integral_constant<bool, __is_trivially_copyable(T)> {};\n\n#else\n\ntemplate <class T>\nstruct is_trivially_copyable\n    : public integral_constant<bool, /* less efficient fallback */ false> {};\n\n#endif\n\n}  // namespace __sanitizer\n\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp",
    "content": "//===-- sanitizer_unwind_linux_libcdep.cpp --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file contains the unwind.h-based (aka \"slow\") stack unwinding routines\n// available to the tools on Linux, Android, NetBSD, FreeBSD, and Solaris.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \\\n    SANITIZER_SOLARIS\n#include \"sanitizer_common.h\"\n#include \"sanitizer_stacktrace.h\"\n\n#if SANITIZER_ANDROID\n#include <dlfcn.h>  // for dlopen()\n#endif\n\n#if SANITIZER_FREEBSD\n#define _GNU_SOURCE  // to declare _Unwind_Backtrace() from <unwind.h>\n#endif\n#include <unwind.h>\n\nnamespace __sanitizer {\n\nnamespace {\n\n//---------------------------- UnwindSlow --------------------------------------\n\ntypedef struct {\n  uptr absolute_pc;\n  uptr stack_top;\n  uptr stack_size;\n} backtrace_frame_t;\n\nextern \"C\" {\ntypedef void *(*acquire_my_map_info_list_func)();\ntypedef void (*release_my_map_info_list_func)(void *map);\ntypedef sptr (*unwind_backtrace_signal_arch_func)(\n    void *siginfo, void *sigcontext, void *map_info_list,\n    backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);\nacquire_my_map_info_list_func acquire_my_map_info_list;\nrelease_my_map_info_list_func release_my_map_info_list;\nunwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;\n} // extern \"C\"\n\n#if defined(__arm__) && !SANITIZER_NETBSD\n// NetBSD uses dwarf EH\n#define UNWIND_STOP _URC_END_OF_STACK\n#define UNWIND_CONTINUE _URC_NO_REASON\n#else\n#define UNWIND_STOP _URC_NORMAL_STOP\n#define UNWIND_CONTINUE _URC_NO_REASON\n#endif\n\nuptr Unwind_GetIP(struct _Unwind_Context *ctx) {\n#if defined(__arm__) && !SANITIZER_MAC\n  uptr val;\n  _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,\n      15 /* r15 = PC */, _UVRSD_UINT32, &val);\n  CHECK(res == _UVRSR_OK && \"_Unwind_VRS_Get failed\");\n  // Clear the Thumb bit.\n  return val & ~(uptr)1;\n#else\n  return (uptr)_Unwind_GetIP(ctx);\n#endif\n}\n\nstruct UnwindTraceArg {\n  BufferedStackTrace *stack;\n  u32 max_depth;\n};\n\n_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {\n  UnwindTraceArg *arg = (UnwindTraceArg*)param;\n  CHECK_LT(arg->stack->size, arg->max_depth);\n  uptr pc = Unwind_GetIP(ctx);\n  const uptr kPageSize = GetPageSizeCached();\n  // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and\n  // x86_64) is invalid and stop unwinding here.  If we're adding support for\n  // a platform where this isn't true, we need to reconsider this check.\n  if (pc < kPageSize) return UNWIND_STOP;\n  arg->stack->trace_buffer[arg->stack->size++] = pc;\n  if (arg->stack->size == arg->max_depth) return UNWIND_STOP;\n  return UNWIND_CONTINUE;\n}\n\n}  // namespace\n\n#if SANITIZER_ANDROID\nvoid SanitizerInitializeUnwinder() {\n  if (AndroidGetApiLevel() >= ANDROID_LOLLIPOP_MR1) return;\n\n  // Pre-lollipop Android can not unwind through signal handler frames with\n  // libgcc unwinder, but it has a libcorkscrew.so library with the necessary\n  // workarounds.\n  void *p = dlopen(\"libcorkscrew.so\", RTLD_LAZY);\n  if (!p) {\n    VReport(1,\n            \"Failed to open libcorkscrew.so. You may see broken stack traces \"\n            \"in SEGV reports.\");\n    return;\n  }\n  acquire_my_map_info_list =\n      (acquire_my_map_info_list_func)(uptr)dlsym(p, \"acquire_my_map_info_list\");\n  release_my_map_info_list =\n      (release_my_map_info_list_func)(uptr)dlsym(p, \"release_my_map_info_list\");\n  unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(\n      p, \"unwind_backtrace_signal_arch\");\n  if (!acquire_my_map_info_list || !release_my_map_info_list ||\n      !unwind_backtrace_signal_arch) {\n    VReport(1,\n            \"Failed to find one of the required symbols in libcorkscrew.so. \"\n            \"You may see broken stack traces in SEGV reports.\");\n    acquire_my_map_info_list = 0;\n    unwind_backtrace_signal_arch = 0;\n    release_my_map_info_list = 0;\n  }\n}\n#endif\n\nvoid BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {\n  CHECK_GE(max_depth, 2);\n  size = 0;\n  UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};\n  _Unwind_Backtrace(Unwind_Trace, &arg);\n  // We need to pop a few frames so that pc is on top.\n  uptr to_pop = LocatePcInTrace(pc);\n  // trace_buffer[0] belongs to the current function so we always pop it,\n  // unless there is only 1 frame in the stack trace (1 frame is always better\n  // than 0!).\n  // 1-frame stacks don't normally happen, but this depends on the actual\n  // unwinder implementation (libgcc, libunwind, etc) which is outside of our\n  // control.\n  if (to_pop == 0 && size > 1)\n    to_pop = 1;\n  PopStackFrames(to_pop);\n#if defined(__GNUC__) && defined(__sparc__)\n  // __builtin_return_address returns the address of the call instruction\n  // on the SPARC and not the return address, so we need to compensate.\n  trace_buffer[0] = GetNextInstructionPc(pc);\n#else\n  trace_buffer[0] = pc;\n#endif\n}\n\nvoid BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {\n  CHECK(context);\n  CHECK_GE(max_depth, 2);\n  if (!unwind_backtrace_signal_arch) {\n    UnwindSlow(pc, max_depth);\n    return;\n  }\n\n  void *map = acquire_my_map_info_list();\n  CHECK(map);\n  InternalMmapVector<backtrace_frame_t> frames(kStackTraceMax);\n  // siginfo argument appears to be unused.\n  sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map,\n                                          frames.data(),\n                                          /* ignore_depth */ 0, max_depth);\n  release_my_map_info_list(map);\n  if (res < 0) return;\n  CHECK_LE((uptr)res, kStackTraceMax);\n\n  size = 0;\n  // +2 compensate for libcorkscrew unwinder returning addresses of call\n  // instructions instead of raw return addresses.\n  for (sptr i = 0; i < res; ++i)\n    trace_buffer[size++] = frames[i].absolute_pc + 2;\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||\n        // SANITIZER_SOLARIS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_unwind_win.cpp",
    "content": "//===-- sanitizer_unwind_win.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n/// Sanitizer unwind Windows specific functions.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS\n\n#define WIN32_LEAN_AND_MEAN\n#define NOGDI\n#include <windows.h>\n\n#include \"sanitizer_dbghelp.h\"  // for StackWalk64\n#include \"sanitizer_stacktrace.h\"\n#include \"sanitizer_symbolizer.h\"  // for InitializeDbgHelpIfNeeded\n\nusing namespace __sanitizer;\n\n#if !SANITIZER_GO\nvoid BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {\n  CHECK_GE(max_depth, 2);\n  // FIXME: CaptureStackBackTrace might be too slow for us.\n  // FIXME: Compare with StackWalk64.\n  // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc\n  size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),\n    (void **)&trace_buffer[0], 0);\n  if (size == 0)\n    return;\n\n  // Skip the RTL frames by searching for the PC in the stacktrace.\n  uptr pc_location = LocatePcInTrace(pc);\n  PopStackFrames(pc_location);\n\n  // Replace the first frame with the PC because the frame in the\n  // stacktrace might be incorrect.\n  trace_buffer[0] = pc;\n}\n\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wframe-larger-than=\"\n#endif\nvoid BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {\n  CHECK(context);\n  CHECK_GE(max_depth, 2);\n  CONTEXT ctx = *(CONTEXT *)context;\n  STACKFRAME64 stack_frame;\n  memset(&stack_frame, 0, sizeof(stack_frame));\n\n  InitializeDbgHelpIfNeeded();\n\n  size = 0;\n#    if SANITIZER_WINDOWS64\n#      if SANITIZER_ARM64\n  int machine_type = IMAGE_FILE_MACHINE_ARM64;\n  stack_frame.AddrPC.Offset = ctx.Pc;\n  stack_frame.AddrFrame.Offset = ctx.Fp;\n  stack_frame.AddrStack.Offset = ctx.Sp;\n#      else\n  int machine_type = IMAGE_FILE_MACHINE_AMD64;\n  stack_frame.AddrPC.Offset = ctx.Rip;\n  stack_frame.AddrFrame.Offset = ctx.Rbp;\n  stack_frame.AddrStack.Offset = ctx.Rsp;\n#      endif\n#    else\n  int machine_type = IMAGE_FILE_MACHINE_I386;\n  stack_frame.AddrPC.Offset = ctx.Eip;\n  stack_frame.AddrFrame.Offset = ctx.Ebp;\n  stack_frame.AddrStack.Offset = ctx.Esp;\n#    endif\n  stack_frame.AddrPC.Mode = AddrModeFlat;\n  stack_frame.AddrFrame.Mode = AddrModeFlat;\n  stack_frame.AddrStack.Mode = AddrModeFlat;\n  while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),\n                     &stack_frame, &ctx, NULL, SymFunctionTableAccess64,\n                     SymGetModuleBase64, NULL) &&\n         size < Min(max_depth, kStackTraceMax)) {\n    trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;\n  }\n}\n#    ifdef __clang__\n#      pragma clang diagnostic pop\n#    endif\n#  endif  // #if !SANITIZER_GO\n\n#endif  // SANITIZER_WINDOWS\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_vector.h",
    "content": "//===-- sanitizer_vector.h -------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between sanitizers run-time libraries.\n//\n//===----------------------------------------------------------------------===//\n\n// Low-fat STL-like vector container.\n\n#ifndef SANITIZER_VECTOR_H\n#define SANITIZER_VECTOR_H\n\n#include \"sanitizer_common/sanitizer_allocator_internal.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n\nnamespace __sanitizer {\n\ntemplate<typename T>\nclass Vector {\n public:\n  Vector() : begin_(), end_(), last_() {}\n\n  ~Vector() {\n    if (begin_)\n      InternalFree(begin_);\n  }\n\n  void Reset() {\n    if (begin_)\n      InternalFree(begin_);\n    begin_ = 0;\n    end_ = 0;\n    last_ = 0;\n  }\n\n  uptr Size() const {\n    return end_ - begin_;\n  }\n\n  T &operator[](uptr i) {\n    DCHECK_LT(i, end_ - begin_);\n    return begin_[i];\n  }\n\n  const T &operator[](uptr i) const {\n    DCHECK_LT(i, end_ - begin_);\n    return begin_[i];\n  }\n\n  T *PushBack() {\n    EnsureSize(Size() + 1);\n    T *p = &end_[-1];\n    internal_memset(p, 0, sizeof(*p));\n    return p;\n  }\n\n  T *PushBack(const T& v) {\n    EnsureSize(Size() + 1);\n    T *p = &end_[-1];\n    internal_memcpy(p, &v, sizeof(*p));\n    return p;\n  }\n\n  void PopBack() {\n    DCHECK_GT(end_, begin_);\n    end_--;\n  }\n\n  void Resize(uptr size) {\n    if (size == 0) {\n      end_ = begin_;\n      return;\n    }\n    uptr old_size = Size();\n    if (size <= old_size) {\n      end_ = begin_ + size;\n      return;\n    }\n    EnsureSize(size);\n    if (old_size < size) {\n      for (uptr i = old_size; i < size; i++)\n        internal_memset(&begin_[i], 0, sizeof(begin_[i]));\n    }\n  }\n\n private:\n  T *begin_;\n  T *end_;\n  T *last_;\n\n  void EnsureSize(uptr size) {\n    if (size <= Size())\n      return;\n    if (size <= (uptr)(last_ - begin_)) {\n      end_ = begin_ + size;\n      return;\n    }\n    uptr cap0 = last_ - begin_;\n    uptr cap = cap0 * 5 / 4;  // 25% growth\n    if (cap == 0)\n      cap = 16;\n    if (cap < size)\n      cap = size;\n    T *p = (T*)InternalAlloc(cap * sizeof(T));\n    if (cap0) {\n      internal_memcpy(p, begin_, cap0 * sizeof(T));\n      InternalFree(begin_);\n    }\n    begin_ = p;\n    end_ = begin_ + size;\n    last_ = begin_ + cap;\n  }\n\n  Vector(const Vector&);\n  void operator=(const Vector&);\n};\n}  // namespace __sanitizer\n\n#endif  // #ifndef SANITIZER_VECTOR_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win.cpp",
    "content": "//===-- sanitizer_win.cpp -------------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is shared between AddressSanitizer and ThreadSanitizer\n// run-time libraries and implements windows-specific functions from\n// sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS\n\n#define WIN32_LEAN_AND_MEAN\n#define NOGDI\n#include <windows.h>\n#include <io.h>\n#include <psapi.h>\n#include <stdlib.h>\n\n#include \"sanitizer_common.h\"\n#include \"sanitizer_file.h\"\n#include \"sanitizer_libc.h\"\n#include \"sanitizer_mutex.h\"\n#include \"sanitizer_placement_new.h\"\n#include \"sanitizer_win_defs.h\"\n\n#if defined(PSAPI_VERSION) && PSAPI_VERSION == 1\n#pragma comment(lib, \"psapi\")\n#endif\n#if SANITIZER_WIN_TRACE\n#include <traceloggingprovider.h>\n//  Windows trace logging provider init\n#pragma comment(lib, \"advapi32.lib\")\nTRACELOGGING_DECLARE_PROVIDER(g_asan_provider);\n// GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp\nTRACELOGGING_DEFINE_PROVIDER(g_asan_provider, \"AddressSanitizerLoggingProvider\",\n                             (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,\n                              0x53, 0x0b, 0xd0, 0xf3, 0xfa));\n#else\n#define TraceLoggingUnregister(x)\n#endif\n\n// For WaitOnAddress\n#  pragma comment(lib, \"synchronization.lib\")\n\n// A macro to tell the compiler that this part of the code cannot be reached,\n// if the compiler supports this feature. Since we're using this in\n// code that is called when terminating the process, the expansion of the\n// macro should not terminate the process to avoid infinite recursion.\n#if defined(__clang__)\n# define BUILTIN_UNREACHABLE() __builtin_unreachable()\n#elif defined(__GNUC__) && \\\n    (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))\n# define BUILTIN_UNREACHABLE() __builtin_unreachable()\n#elif defined(_MSC_VER)\n# define BUILTIN_UNREACHABLE() __assume(0)\n#else\n# define BUILTIN_UNREACHABLE()\n#endif\n\nnamespace __sanitizer {\n\n#include \"sanitizer_syscall_generic.inc\"\n\n// --------------------- sanitizer_common.h\nuptr GetPageSize() {\n  SYSTEM_INFO si;\n  GetSystemInfo(&si);\n  return si.dwPageSize;\n}\n\nuptr GetMmapGranularity() {\n  SYSTEM_INFO si;\n  GetSystemInfo(&si);\n  return si.dwAllocationGranularity;\n}\n\nuptr GetMaxUserVirtualAddress() {\n  SYSTEM_INFO si;\n  GetSystemInfo(&si);\n  return (uptr)si.lpMaximumApplicationAddress;\n}\n\nuptr GetMaxVirtualAddress() {\n  return GetMaxUserVirtualAddress();\n}\n\nbool FileExists(const char *filename) {\n  return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;\n}\n\nuptr internal_getpid() {\n  return GetProcessId(GetCurrentProcess());\n}\n\nint internal_dlinfo(void *handle, int request, void *p) {\n  UNIMPLEMENTED();\n}\n\n// In contrast to POSIX, on Windows GetCurrentThreadId()\n// returns a system-unique identifier.\ntid_t GetTid() {\n  return GetCurrentThreadId();\n}\n\nuptr GetThreadSelf() {\n  return GetTid();\n}\n\n#if !SANITIZER_GO\nvoid GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,\n                                uptr *stack_bottom) {\n  CHECK(stack_top);\n  CHECK(stack_bottom);\n  MEMORY_BASIC_INFORMATION mbi;\n  CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);\n  // FIXME: is it possible for the stack to not be a single allocation?\n  // Are these values what ASan expects to get (reserved, not committed;\n  // including stack guard page) ?\n  *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;\n  *stack_bottom = (uptr)mbi.AllocationBase;\n}\n#endif  // #if !SANITIZER_GO\n\nvoid *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {\n  void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);\n  if (rv == 0)\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\",\n                            GetLastError(), raw_report);\n  return rv;\n}\n\nvoid UnmapOrDie(void *addr, uptr size) {\n  if (!size || !addr)\n    return;\n\n  MEMORY_BASIC_INFORMATION mbi;\n  CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));\n\n  // MEM_RELEASE can only be used to unmap whole regions previously mapped with\n  // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that\n  // fails try MEM_DECOMMIT.\n  if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {\n    if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {\n      Report(\"ERROR: %s failed to \"\n             \"deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\\n\",\n             SanitizerToolName, size, size, addr, GetLastError());\n      CHECK(\"unable to unmap\" && 0);\n    }\n  }\n}\n\nstatic void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,\n                                     const char *mmap_type) {\n  error_t last_error = GetLastError();\n  if (last_error == ERROR_NOT_ENOUGH_MEMORY)\n    return nullptr;\n  ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);\n}\n\nvoid *MmapOrDieOnFatalError(uptr size, const char *mem_type) {\n  void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);\n  if (rv == 0)\n    return ReturnNullptrOnOOMOrDie(size, mem_type, \"allocate\");\n  return rv;\n}\n\n// We want to map a chunk of address space aligned to 'alignment'.\nvoid *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,\n                                   const char *mem_type) {\n  CHECK(IsPowerOfTwo(size));\n  CHECK(IsPowerOfTwo(alignment));\n\n  // Windows will align our allocations to at least 64K.\n  alignment = Max(alignment, GetMmapGranularity());\n\n  uptr mapped_addr =\n      (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);\n  if (!mapped_addr)\n    return ReturnNullptrOnOOMOrDie(size, mem_type, \"allocate aligned\");\n\n  // If we got it right on the first try, return. Otherwise, unmap it and go to\n  // the slow path.\n  if (IsAligned(mapped_addr, alignment))\n    return (void*)mapped_addr;\n  if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)\n    ReportMmapFailureAndDie(size, mem_type, \"deallocate\", GetLastError());\n\n  // If we didn't get an aligned address, overallocate, find an aligned address,\n  // unmap, and try to allocate at that aligned address.\n  int retries = 0;\n  const int kMaxRetries = 10;\n  for (; retries < kMaxRetries &&\n         (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));\n       retries++) {\n    // Overallocate size + alignment bytes.\n    mapped_addr =\n        (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);\n    if (!mapped_addr)\n      return ReturnNullptrOnOOMOrDie(size, mem_type, \"allocate aligned\");\n\n    // Find the aligned address.\n    uptr aligned_addr = RoundUpTo(mapped_addr, alignment);\n\n    // Free the overallocation.\n    if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)\n      ReportMmapFailureAndDie(size, mem_type, \"deallocate\", GetLastError());\n\n    // Attempt to allocate exactly the number of bytes we need at the aligned\n    // address. This may fail for a number of reasons, in which case we continue\n    // the loop.\n    mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,\n                                     MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);\n  }\n\n  // Fail if we can't make this work quickly.\n  if (retries == kMaxRetries && mapped_addr == 0)\n    return ReturnNullptrOnOOMOrDie(size, mem_type, \"allocate aligned\");\n\n  return (void *)mapped_addr;\n}\n\nbool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {\n  // FIXME: is this really \"NoReserve\"? On Win32 this does not matter much,\n  // but on Win64 it does.\n  (void)name;  // unsupported\n#if !SANITIZER_GO && SANITIZER_WINDOWS64\n  // On asan/Windows64, use MEM_COMMIT would result in error\n  // 1455:ERROR_COMMITMENT_LIMIT.\n  // Asan uses exception handler to commit page on demand.\n  void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);\n#else\n  void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,\n                         PAGE_READWRITE);\n#endif\n  if (p == 0) {\n    Report(\"ERROR: %s failed to \"\n           \"allocate %p (%zd) bytes at %p (error code: %d)\\n\",\n           SanitizerToolName, size, size, fixed_addr, GetLastError());\n    return false;\n  }\n  return true;\n}\n\nbool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {\n  // FIXME: Windows support large pages too. Might be worth checking\n  return MmapFixedNoReserve(fixed_addr, size, name);\n}\n\n// Memory space mapped by 'MmapFixedOrDie' must have been reserved by\n// 'MmapFixedNoAccess'.\nvoid *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {\n  void *p = VirtualAlloc((LPVOID)fixed_addr, size,\n      MEM_COMMIT, PAGE_READWRITE);\n  if (p == 0) {\n    char mem_type[30];\n    internal_snprintf(mem_type, sizeof(mem_type), \"memory at address 0x%zx\",\n                      fixed_addr);\n    ReportMmapFailureAndDie(size, mem_type, \"allocate\", GetLastError());\n  }\n  return p;\n}\n\n// Uses fixed_addr for now.\n// Will use offset instead once we've implemented this function for real.\nuptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {\n  return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));\n}\n\nuptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,\n                                    const char *name) {\n  return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));\n}\n\nvoid ReservedAddressRange::Unmap(uptr addr, uptr size) {\n  // Only unmap if it covers the entire range.\n  CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_));\n  // We unmap the whole range, just null out the base.\n  base_ = nullptr;\n  size_ = 0;\n  UnmapOrDie(reinterpret_cast<void*>(addr), size);\n}\n\nvoid *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {\n  void *p = VirtualAlloc((LPVOID)fixed_addr, size,\n      MEM_COMMIT, PAGE_READWRITE);\n  if (p == 0) {\n    char mem_type[30];\n    internal_snprintf(mem_type, sizeof(mem_type), \"memory at address 0x%zx\",\n                      fixed_addr);\n    return ReturnNullptrOnOOMOrDie(size, mem_type, \"allocate\");\n  }\n  return p;\n}\n\nvoid *MmapNoReserveOrDie(uptr size, const char *mem_type) {\n  // FIXME: make this really NoReserve?\n  return MmapOrDie(size, mem_type);\n}\n\nuptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {\n  base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);\n  size_ = size;\n  name_ = name;\n  (void)os_handle_;  // unsupported\n  return reinterpret_cast<uptr>(base_);\n}\n\n\nvoid *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {\n  (void)name; // unsupported\n  void *res = VirtualAlloc((LPVOID)fixed_addr, size,\n                           MEM_RESERVE, PAGE_NOACCESS);\n  if (res == 0)\n    Report(\"WARNING: %s failed to \"\n           \"mprotect %p (%zd) bytes at %p (error code: %d)\\n\",\n           SanitizerToolName, size, size, fixed_addr, GetLastError());\n  return res;\n}\n\nvoid *MmapNoAccess(uptr size) {\n  void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);\n  if (res == 0)\n    Report(\"WARNING: %s failed to \"\n           \"mprotect %p (%zd) bytes (error code: %d)\\n\",\n           SanitizerToolName, size, size, GetLastError());\n  return res;\n}\n\nbool MprotectNoAccess(uptr addr, uptr size) {\n  DWORD old_protection;\n  return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);\n}\n\nbool MprotectReadOnly(uptr addr, uptr size) {\n  DWORD old_protection;\n  return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection);\n}\n\nvoid ReleaseMemoryPagesToOS(uptr beg, uptr end) {\n  uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),\n       end_aligned = RoundDownTo(end, GetPageSizeCached());\n  CHECK(beg < end);                // make sure the region is sane\n  if (beg_aligned == end_aligned)  // make sure we're freeing at least 1 page;\n    return;\n  UnmapOrDie((void *)beg, end_aligned - beg_aligned);\n}\n\nvoid SetShadowRegionHugePageMode(uptr addr, uptr size) {\n  // FIXME: probably similar to ReleaseMemoryToOS.\n}\n\nbool DontDumpShadowMemory(uptr addr, uptr length) {\n  // This is almost useless on 32-bits.\n  // FIXME: add madvise-analog when we move to 64-bits.\n  return true;\n}\n\nuptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,\n                      uptr min_shadow_base_alignment,\n                      UNUSED uptr &high_mem_end) {\n  const uptr granularity = GetMmapGranularity();\n  const uptr alignment =\n      Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);\n  const uptr left_padding =\n      Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);\n  uptr space_size = shadow_size_bytes + left_padding;\n  uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,\n                                               granularity, nullptr, nullptr);\n  CHECK_NE((uptr)0, shadow_start);\n  CHECK(IsAligned(shadow_start, alignment));\n  return shadow_start;\n}\n\nuptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,\n                              uptr *largest_gap_found,\n                              uptr *max_occupied_addr) {\n  uptr address = 0;\n  while (true) {\n    MEMORY_BASIC_INFORMATION info;\n    if (!::VirtualQuery((void*)address, &info, sizeof(info)))\n      return 0;\n\n    if (info.State == MEM_FREE) {\n      uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,\n                                      alignment);\n      if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)\n        return shadow_address;\n    }\n\n    // Move to the next region.\n    address = (uptr)info.BaseAddress + info.RegionSize;\n  }\n  return 0;\n}\n\nuptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,\n                                uptr num_aliases, uptr ring_buffer_size) {\n  CHECK(false && \"HWASan aliasing is unimplemented on Windows\");\n  return 0;\n}\n\nbool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {\n  MEMORY_BASIC_INFORMATION mbi;\n  CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));\n  return mbi.Protect == PAGE_NOACCESS &&\n         (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;\n}\n\nvoid *MapFileToMemory(const char *file_name, uptr *buff_size) {\n  UNIMPLEMENTED();\n}\n\nvoid *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {\n  UNIMPLEMENTED();\n}\n\nstatic const int kMaxEnvNameLength = 128;\nstatic const DWORD kMaxEnvValueLength = 32767;\n\nnamespace {\n\nstruct EnvVariable {\n  char name[kMaxEnvNameLength];\n  char value[kMaxEnvValueLength];\n};\n\n}  // namespace\n\nstatic const int kEnvVariables = 5;\nstatic EnvVariable env_vars[kEnvVariables];\nstatic int num_env_vars;\n\nconst char *GetEnv(const char *name) {\n  // Note: this implementation caches the values of the environment variables\n  // and limits their quantity.\n  for (int i = 0; i < num_env_vars; i++) {\n    if (0 == internal_strcmp(name, env_vars[i].name))\n      return env_vars[i].value;\n  }\n  CHECK_LT(num_env_vars, kEnvVariables);\n  DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,\n                                     kMaxEnvValueLength);\n  if (rv > 0 && rv < kMaxEnvValueLength) {\n    CHECK_LT(internal_strlen(name), kMaxEnvNameLength);\n    internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);\n    num_env_vars++;\n    return env_vars[num_env_vars - 1].value;\n  }\n  return 0;\n}\n\nconst char *GetPwd() {\n  UNIMPLEMENTED();\n}\n\nu32 GetUid() {\n  UNIMPLEMENTED();\n}\n\nnamespace {\nstruct ModuleInfo {\n  const char *filepath;\n  uptr base_address;\n  uptr end_address;\n};\n\n#if !SANITIZER_GO\nint CompareModulesBase(const void *pl, const void *pr) {\n  const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr;\n  if (l->base_address < r->base_address)\n    return -1;\n  return l->base_address > r->base_address;\n}\n#endif\n}  // namespace\n\n#if !SANITIZER_GO\nvoid DumpProcessMap() {\n  Report(\"Dumping process modules:\\n\");\n  ListOfModules modules;\n  modules.init();\n  uptr num_modules = modules.size();\n\n  InternalMmapVector<ModuleInfo> module_infos(num_modules);\n  for (size_t i = 0; i < num_modules; ++i) {\n    module_infos[i].filepath = modules[i].full_name();\n    module_infos[i].base_address = modules[i].ranges().front()->beg;\n    module_infos[i].end_address = modules[i].ranges().back()->end;\n  }\n  qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),\n        CompareModulesBase);\n\n  for (size_t i = 0; i < num_modules; ++i) {\n    const ModuleInfo &mi = module_infos[i];\n    if (mi.end_address != 0) {\n      Printf(\"\\t%p-%p %s\\n\", mi.base_address, mi.end_address,\n             mi.filepath[0] ? mi.filepath : \"[no name]\");\n    } else if (mi.filepath[0]) {\n      Printf(\"\\t??\\?-??? %s\\n\", mi.filepath);\n    } else {\n      Printf(\"\\t???\\n\");\n    }\n  }\n}\n#endif\n\nvoid DisableCoreDumperIfNecessary() {\n  // Do nothing.\n}\n\nvoid ReExec() {\n  UNIMPLEMENTED();\n}\n\nvoid PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}\n\nbool StackSizeIsUnlimited() {\n  UNIMPLEMENTED();\n}\n\nvoid SetStackSizeLimitInBytes(uptr limit) {\n  UNIMPLEMENTED();\n}\n\nbool AddressSpaceIsUnlimited() {\n  UNIMPLEMENTED();\n}\n\nvoid SetAddressSpaceUnlimited() {\n  UNIMPLEMENTED();\n}\n\nbool IsPathSeparator(const char c) {\n  return c == '\\\\' || c == '/';\n}\n\nstatic bool IsAlpha(char c) {\n  c = ToLower(c);\n  return c >= 'a' && c <= 'z';\n}\n\nbool IsAbsolutePath(const char *path) {\n  return path != nullptr && IsAlpha(path[0]) && path[1] == ':' &&\n         IsPathSeparator(path[2]);\n}\n\nvoid internal_usleep(u64 useconds) { Sleep(useconds / 1000); }\n\nu64 NanoTime() {\n  static LARGE_INTEGER frequency = {};\n  LARGE_INTEGER counter;\n  if (UNLIKELY(frequency.QuadPart == 0)) {\n    QueryPerformanceFrequency(&frequency);\n    CHECK_NE(frequency.QuadPart, 0);\n  }\n  QueryPerformanceCounter(&counter);\n  counter.QuadPart *= 1000ULL * 1000000ULL;\n  counter.QuadPart /= frequency.QuadPart;\n  return counter.QuadPart;\n}\n\nu64 MonotonicNanoTime() { return NanoTime(); }\n\nvoid Abort() {\n  internal__exit(3);\n}\n\nbool CreateDir(const char *pathname) {\n  return CreateDirectoryA(pathname, nullptr) != 0;\n}\n\n#if !SANITIZER_GO\n// Read the file to extract the ImageBase field from the PE header. If ASLR is\n// disabled and this virtual address is available, the loader will typically\n// load the image at this address. Therefore, we call it the preferred base. Any\n// addresses in the DWARF typically assume that the object has been loaded at\n// this address.\nstatic uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) {\n  fd_t fd = OpenFile(modname, RdOnly, nullptr);\n  if (fd == kInvalidFd)\n    return 0;\n  FileCloser closer(fd);\n\n  // Read just the DOS header.\n  IMAGE_DOS_HEADER dos_header;\n  uptr bytes_read;\n  if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||\n      bytes_read != sizeof(dos_header))\n    return 0;\n\n  // The file should start with the right signature.\n  if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)\n    return 0;\n\n  // The layout at e_lfanew is:\n  // \"PE\\0\\0\"\n  // IMAGE_FILE_HEADER\n  // IMAGE_OPTIONAL_HEADER\n  // Seek to e_lfanew and read all that data.\n  if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==\n      INVALID_SET_FILE_POINTER)\n    return 0;\n  if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size)\n    return 0;\n\n  // Check for \"PE\\0\\0\" before the PE header.\n  char *pe_sig = &buf[0];\n  if (internal_memcmp(pe_sig, \"PE\\0\\0\", 4) != 0)\n    return 0;\n\n  // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.\n  IMAGE_OPTIONAL_HEADER *pe_header =\n      (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));\n\n  // Check for more magic in the PE header.\n  if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)\n    return 0;\n\n  // Finally, return the ImageBase.\n  return (uptr)pe_header->ImageBase;\n}\n\nvoid ListOfModules::init() {\n  clearOrInit();\n  HANDLE cur_process = GetCurrentProcess();\n\n  // Query the list of modules.  Start by assuming there are no more than 256\n  // modules and retry if that's not sufficient.\n  HMODULE *hmodules = 0;\n  uptr modules_buffer_size = sizeof(HMODULE) * 256;\n  DWORD bytes_required;\n  while (!hmodules) {\n    hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);\n    CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,\n                             &bytes_required));\n    if (bytes_required > modules_buffer_size) {\n      // Either there turned out to be more than 256 hmodules, or new hmodules\n      // could have loaded since the last try.  Retry.\n      UnmapOrDie(hmodules, modules_buffer_size);\n      hmodules = 0;\n      modules_buffer_size = bytes_required;\n    }\n  }\n\n  InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) +\n                               sizeof(IMAGE_OPTIONAL_HEADER));\n  InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength);\n  InternalMmapVector<char> module_name(kMaxPathLength);\n  // |num_modules| is the number of modules actually present,\n  size_t num_modules = bytes_required / sizeof(HMODULE);\n  for (size_t i = 0; i < num_modules; ++i) {\n    HMODULE handle = hmodules[i];\n    MODULEINFO mi;\n    if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))\n      continue;\n\n    // Get the UTF-16 path and convert to UTF-8.\n    int modname_utf16_len =\n        GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength);\n    if (modname_utf16_len == 0)\n      modname_utf16[0] = '\\0';\n    int module_name_len = ::WideCharToMultiByte(\n        CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0],\n        kMaxPathLength, NULL, NULL);\n    module_name[module_name_len] = '\\0';\n\n    uptr base_address = (uptr)mi.lpBaseOfDll;\n    uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;\n\n    // Adjust the base address of the module so that we get a VA instead of an\n    // RVA when computing the module offset. This helps llvm-symbolizer find the\n    // right DWARF CU. In the common case that the image is loaded at it's\n    // preferred address, we will now print normal virtual addresses.\n    uptr preferred_base =\n        GetPreferredBase(&module_name[0], &buf[0], buf.size());\n    uptr adjusted_base = base_address - preferred_base;\n\n    modules_.push_back(LoadedModule());\n    LoadedModule &cur_module = modules_.back();\n    cur_module.set(&module_name[0], adjusted_base);\n    // We add the whole module as one single address range.\n    cur_module.addAddressRange(base_address, end_address, /*executable*/ true,\n                               /*writable*/ true);\n  }\n  UnmapOrDie(hmodules, modules_buffer_size);\n}\n\nvoid ListOfModules::fallbackInit() { clear(); }\n\n// We can't use atexit() directly at __asan_init time as the CRT is not fully\n// initialized at this point.  Place the functions into a vector and use\n// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).\nInternalMmapVectorNoCtor<void (*)(void)> atexit_functions;\n\nint Atexit(void (*function)(void)) {\n  atexit_functions.push_back(function);\n  return 0;\n}\n\nstatic int RunAtexit() {\n  TraceLoggingUnregister(g_asan_provider);\n  int ret = 0;\n  for (uptr i = 0; i < atexit_functions.size(); ++i) {\n    ret |= atexit(atexit_functions[i]);\n  }\n  return ret;\n}\n\n#pragma section(\".CRT$XID\", long, read)\n__declspec(allocate(\".CRT$XID\")) int (*__run_atexit)() = RunAtexit;\n#endif\n\n// ------------------ sanitizer_libc.h\nfd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {\n  // FIXME: Use the wide variants to handle Unicode filenames.\n  fd_t res;\n  if (mode == RdOnly) {\n    res = CreateFileA(filename, GENERIC_READ,\n                      FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n                      nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);\n  } else if (mode == WrOnly) {\n    res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,\n                      FILE_ATTRIBUTE_NORMAL, nullptr);\n  } else {\n    UNIMPLEMENTED();\n  }\n  CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);\n  CHECK(res != kStderrFd || kStderrFd == kInvalidFd);\n  if (res == kInvalidFd && last_error)\n    *last_error = GetLastError();\n  return res;\n}\n\nvoid CloseFile(fd_t fd) {\n  CloseHandle(fd);\n}\n\nbool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,\n                  error_t *error_p) {\n  CHECK(fd != kInvalidFd);\n\n  // bytes_read can't be passed directly to ReadFile:\n  // uptr is unsigned long long on 64-bit Windows.\n  unsigned long num_read_long;\n\n  bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);\n  if (!success && error_p)\n    *error_p = GetLastError();\n  if (bytes_read)\n    *bytes_read = num_read_long;\n  return success;\n}\n\nbool SupportsColoredOutput(fd_t fd) {\n  // FIXME: support colored output.\n  return false;\n}\n\nbool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,\n                 error_t *error_p) {\n  CHECK(fd != kInvalidFd);\n\n  // Handle null optional parameters.\n  error_t dummy_error;\n  error_p = error_p ? error_p : &dummy_error;\n  uptr dummy_bytes_written;\n  bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;\n\n  // Initialize output parameters in case we fail.\n  *error_p = 0;\n  *bytes_written = 0;\n\n  // Map the conventional Unix fds 1 and 2 to Windows handles. They might be\n  // closed, in which case this will fail.\n  if (fd == kStdoutFd || fd == kStderrFd) {\n    fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);\n    if (fd == 0) {\n      *error_p = ERROR_INVALID_HANDLE;\n      return false;\n    }\n  }\n\n  DWORD bytes_written_32;\n  if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {\n    *error_p = GetLastError();\n    return false;\n  } else {\n    *bytes_written = bytes_written_32;\n    return true;\n  }\n}\n\nuptr internal_sched_yield() {\n  Sleep(0);\n  return 0;\n}\n\nvoid internal__exit(int exitcode) {\n  TraceLoggingUnregister(g_asan_provider);\n  // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.\n  // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,\n  // so add our own breakpoint here.\n  if (::IsDebuggerPresent())\n    __debugbreak();\n  TerminateProcess(GetCurrentProcess(), exitcode);\n  BUILTIN_UNREACHABLE();\n}\n\nuptr internal_ftruncate(fd_t fd, uptr size) {\n  UNIMPLEMENTED();\n}\n\nuptr GetRSS() {\n  PROCESS_MEMORY_COUNTERS counters;\n  if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)))\n    return 0;\n  return counters.WorkingSetSize;\n}\n\nvoid *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }\nvoid internal_join_thread(void *th) { }\n\nvoid FutexWait(atomic_uint32_t *p, u32 cmp) {\n  WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE);\n}\n\nvoid FutexWake(atomic_uint32_t *p, u32 count) {\n  if (count == 1)\n    WakeByAddressSingle(p);\n  else\n    WakeByAddressAll(p);\n}\n\nuptr GetTlsSize() {\n  return 0;\n}\n\nvoid InitTlsSize() {\n}\n\nvoid GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,\n                          uptr *tls_addr, uptr *tls_size) {\n#if SANITIZER_GO\n  *stk_addr = 0;\n  *stk_size = 0;\n  *tls_addr = 0;\n  *tls_size = 0;\n#else\n  uptr stack_top, stack_bottom;\n  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);\n  *stk_addr = stack_bottom;\n  *stk_size = stack_top - stack_bottom;\n  *tls_addr = 0;\n  *tls_size = 0;\n#endif\n}\n\nvoid ReportFile::Write(const char *buffer, uptr length) {\n  SpinMutexLock l(mu);\n  ReopenIfNecessary();\n  if (!WriteToFile(fd, buffer, length)) {\n    // stderr may be closed, but we may be able to print to the debugger\n    // instead.  This is the case when launching a program from Visual Studio,\n    // and the following routine should write to its console.\n    OutputDebugStringA(buffer);\n  }\n}\n\nvoid SetAlternateSignalStack() {\n  // FIXME: Decide what to do on Windows.\n}\n\nvoid UnsetAlternateSignalStack() {\n  // FIXME: Decide what to do on Windows.\n}\n\nvoid InstallDeadlySignalHandlers(SignalHandlerType handler) {\n  (void)handler;\n  // FIXME: Decide what to do on Windows.\n}\n\nHandleSignalMode GetHandleSignalMode(int signum) {\n  // FIXME: Decide what to do on Windows.\n  return kHandleSignalNo;\n}\n\n// Check based on flags if we should handle this exception.\nbool IsHandledDeadlyException(DWORD exceptionCode) {\n  switch (exceptionCode) {\n    case EXCEPTION_ACCESS_VIOLATION:\n    case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:\n    case EXCEPTION_STACK_OVERFLOW:\n    case EXCEPTION_DATATYPE_MISALIGNMENT:\n    case EXCEPTION_IN_PAGE_ERROR:\n      return common_flags()->handle_segv;\n    case EXCEPTION_ILLEGAL_INSTRUCTION:\n    case EXCEPTION_PRIV_INSTRUCTION:\n    case EXCEPTION_BREAKPOINT:\n      return common_flags()->handle_sigill;\n    case EXCEPTION_FLT_DENORMAL_OPERAND:\n    case EXCEPTION_FLT_DIVIDE_BY_ZERO:\n    case EXCEPTION_FLT_INEXACT_RESULT:\n    case EXCEPTION_FLT_INVALID_OPERATION:\n    case EXCEPTION_FLT_OVERFLOW:\n    case EXCEPTION_FLT_STACK_CHECK:\n    case EXCEPTION_FLT_UNDERFLOW:\n    case EXCEPTION_INT_DIVIDE_BY_ZERO:\n    case EXCEPTION_INT_OVERFLOW:\n      return common_flags()->handle_sigfpe;\n  }\n  return false;\n}\n\nbool IsAccessibleMemoryRange(uptr beg, uptr size) {\n  SYSTEM_INFO si;\n  GetNativeSystemInfo(&si);\n  uptr page_size = si.dwPageSize;\n  uptr page_mask = ~(page_size - 1);\n\n  for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;\n       page <= end;) {\n    MEMORY_BASIC_INFORMATION info;\n    if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))\n      return false;\n\n    if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||\n        info.Protect == PAGE_EXECUTE)\n      return false;\n\n    if (info.RegionSize == 0)\n      return false;\n\n    page += info.RegionSize;\n  }\n\n  return true;\n}\n\nbool SignalContext::IsStackOverflow() const {\n  return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;\n}\n\nvoid SignalContext::InitPcSpBp() {\n  EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;\n  CONTEXT *context_record = (CONTEXT *)context;\n\n  pc = (uptr)exception_record->ExceptionAddress;\n#  if SANITIZER_WINDOWS64\n#    if SANITIZER_ARM64\n  bp = (uptr)context_record->Fp;\n  sp = (uptr)context_record->Sp;\n#    else\n  bp = (uptr)context_record->Rbp;\n  sp = (uptr)context_record->Rsp;\n#    endif\n#  else\n  bp = (uptr)context_record->Ebp;\n  sp = (uptr)context_record->Esp;\n#  endif\n}\n\nuptr SignalContext::GetAddress() const {\n  EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;\n  if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)\n    return exception_record->ExceptionInformation[1];\n  return (uptr)exception_record->ExceptionAddress;\n}\n\nbool SignalContext::IsMemoryAccess() const {\n  return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode ==\n         EXCEPTION_ACCESS_VIOLATION;\n}\n\nbool SignalContext::IsTrueFaultingAddress() const { return true; }\n\nSignalContext::WriteFlag SignalContext::GetWriteFlag() const {\n  EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;\n\n  // The write flag is only available for access violation exceptions.\n  if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)\n    return SignalContext::Unknown;\n\n  // The contents of this array are documented at\n  // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record\n  // The first element indicates read as 0, write as 1, or execute as 8.  The\n  // second element is the faulting address.\n  switch (exception_record->ExceptionInformation[0]) {\n    case 0:\n      return SignalContext::Read;\n    case 1:\n      return SignalContext::Write;\n    case 8:\n      return SignalContext::Unknown;\n  }\n  return SignalContext::Unknown;\n}\n\nvoid SignalContext::DumpAllRegisters(void *context) {\n  // FIXME: Implement this.\n}\n\nint SignalContext::GetType() const {\n  return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;\n}\n\nconst char *SignalContext::Describe() const {\n  unsigned code = GetType();\n  // Get the string description of the exception if this is a known deadly\n  // exception.\n  switch (code) {\n    case EXCEPTION_ACCESS_VIOLATION:\n      return \"access-violation\";\n    case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:\n      return \"array-bounds-exceeded\";\n    case EXCEPTION_STACK_OVERFLOW:\n      return \"stack-overflow\";\n    case EXCEPTION_DATATYPE_MISALIGNMENT:\n      return \"datatype-misalignment\";\n    case EXCEPTION_IN_PAGE_ERROR:\n      return \"in-page-error\";\n    case EXCEPTION_ILLEGAL_INSTRUCTION:\n      return \"illegal-instruction\";\n    case EXCEPTION_PRIV_INSTRUCTION:\n      return \"priv-instruction\";\n    case EXCEPTION_BREAKPOINT:\n      return \"breakpoint\";\n    case EXCEPTION_FLT_DENORMAL_OPERAND:\n      return \"flt-denormal-operand\";\n    case EXCEPTION_FLT_DIVIDE_BY_ZERO:\n      return \"flt-divide-by-zero\";\n    case EXCEPTION_FLT_INEXACT_RESULT:\n      return \"flt-inexact-result\";\n    case EXCEPTION_FLT_INVALID_OPERATION:\n      return \"flt-invalid-operation\";\n    case EXCEPTION_FLT_OVERFLOW:\n      return \"flt-overflow\";\n    case EXCEPTION_FLT_STACK_CHECK:\n      return \"flt-stack-check\";\n    case EXCEPTION_FLT_UNDERFLOW:\n      return \"flt-underflow\";\n    case EXCEPTION_INT_DIVIDE_BY_ZERO:\n      return \"int-divide-by-zero\";\n    case EXCEPTION_INT_OVERFLOW:\n      return \"int-overflow\";\n  }\n  return \"unknown exception\";\n}\n\nuptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {\n  if (buf_len == 0)\n    return 0;\n\n  // Get the UTF-16 path and convert to UTF-8.\n  InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength);\n  int binname_utf16_len =\n      GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength);\n  if (binname_utf16_len == 0) {\n    buf[0] = '\\0';\n    return 0;\n  }\n  int binary_name_len =\n      ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len,\n                            buf, buf_len, NULL, NULL);\n  if ((unsigned)binary_name_len == buf_len)\n    --binary_name_len;\n  buf[binary_name_len] = '\\0';\n  return binary_name_len;\n}\n\nuptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {\n  return ReadBinaryName(buf, buf_len);\n}\n\nvoid CheckVMASize() {\n  // Do nothing.\n}\n\nvoid InitializePlatformEarly() {\n  // Do nothing.\n}\n\nvoid MaybeReexec() {\n  // No need to re-exec on Windows.\n}\n\nvoid CheckASLR() {\n  // Do nothing\n}\n\nvoid CheckMPROTECT() {\n  // Do nothing\n}\n\nchar **GetArgv() {\n  // FIXME: Actually implement this function.\n  return 0;\n}\n\nchar **GetEnviron() {\n  // FIXME: Actually implement this function.\n  return 0;\n}\n\npid_t StartSubprocess(const char *program, const char *const argv[],\n                      const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,\n                      fd_t stderr_fd) {\n  // FIXME: implement on this platform\n  // Should be implemented based on\n  // SymbolizerProcess::StarAtSymbolizerSubprocess\n  // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp.\n  return -1;\n}\n\nbool IsProcessRunning(pid_t pid) {\n  // FIXME: implement on this platform.\n  return false;\n}\n\nint WaitForProcess(pid_t pid) { return -1; }\n\n// FIXME implement on this platform.\nvoid GetMemoryProfile(fill_profile_f cb, uptr *stats) {}\n\nvoid CheckNoDeepBind(const char *filename, int flag) {\n  // Do nothing.\n}\n\n// FIXME: implement on this platform.\nbool GetRandom(void *buffer, uptr length, bool blocking) {\n  UNIMPLEMENTED();\n}\n\nu32 GetNumberOfCPUs() {\n  SYSTEM_INFO sysinfo = {};\n  GetNativeSystemInfo(&sysinfo);\n  return sysinfo.dwNumberOfProcessors;\n}\n\n#if SANITIZER_WIN_TRACE\n// TODO(mcgov): Rename this project-wide to PlatformLogInit\nvoid AndroidLogInit(void) {\n  HRESULT hr = TraceLoggingRegister(g_asan_provider);\n  if (!SUCCEEDED(hr))\n    return;\n}\n\nvoid SetAbortMessage(const char *) {}\n\nvoid LogFullErrorReport(const char *buffer) {\n  if (common_flags()->log_to_syslog) {\n    InternalMmapVector<wchar_t> filename;\n    DWORD filename_length = 0;\n    do {\n      filename.resize(filename.size() + 0x100);\n      filename_length =\n          GetModuleFileNameW(NULL, filename.begin(), filename.size());\n    } while (filename_length >= filename.size());\n    TraceLoggingWrite(g_asan_provider, \"AsanReportEvent\",\n                      TraceLoggingValue(filename.begin(), \"ExecutableName\"),\n                      TraceLoggingValue(buffer, \"AsanReportContents\"));\n  }\n}\n#endif // SANITIZER_WIN_TRACE\n\nvoid InitializePlatformCommonFlags(CommonFlags *cf) {}\n\n}  // namespace __sanitizer\n\n#endif  // _WIN32\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win.h",
    "content": "//===-- sanitizer_win.h -----------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Windows-specific declarations.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_WIN_H\n#define SANITIZER_WIN_H\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n// Check based on flags if we should handle the exception.\nbool IsHandledDeadlyException(DWORD exceptionCode);\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_WINDOWS\n#endif  // SANITIZER_WIN_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win_defs.h",
    "content": "//===-- sanitizer_win_defs.h ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Common definitions for Windows-specific code.\n//\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_WIN_DEFS_H\n#define SANITIZER_WIN_DEFS_H\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS\n\n#ifndef WINAPI\n#if defined(_M_IX86) || defined(__i386__)\n#define WINAPI __stdcall\n#else\n#define WINAPI\n#endif\n#endif\n\n#if defined(_M_IX86) || defined(__i386__)\n#define WIN_SYM_PREFIX \"_\"\n#else\n#define WIN_SYM_PREFIX\n#endif\n\n// For MinGW, the /export: directives contain undecorated symbols, contrary to\n// link/lld-link. The GNU linker doesn't support /alternatename and /include\n// though, thus lld-link in MinGW mode interprets them in the same way as\n// in the default mode.\n#ifdef __MINGW32__\n#define WIN_EXPORT_PREFIX\n#else\n#define WIN_EXPORT_PREFIX WIN_SYM_PREFIX\n#endif\n\n// Intermediate macro to ensure the parameter is expanded before stringified.\n#define STRINGIFY_(A) #A\n#define STRINGIFY(A) STRINGIFY_(A)\n\n#if !SANITIZER_GO\n\n// ----------------- A workaround for the absence of weak symbols --------------\n// We don't have a direct equivalent of weak symbols when using MSVC, but we can\n// use the /alternatename directive to tell the linker to default a specific\n// symbol to a specific value.\n// Take into account that this is a pragma directive for the linker, so it will\n// be ignored by the compiler and the function will be marked as UNDEF in the\n// symbol table of the resulting object file. The linker won't find the default\n// implementation until it links with that object file.\n// So, suppose we provide a default implementation \"fundef\" for \"fun\", and this\n// is compiled into the object file \"test.obj\" including the pragma directive.\n// If we have some code with references to \"fun\" and we link that code with\n// \"test.obj\", it will work because the linker always link object files.\n// But, if \"test.obj\" is included in a static library, like \"test.lib\", then the\n// liker will only link to \"test.obj\" if necessary. If we only included the\n// definition of \"fun\", it won't link to \"test.obj\" (from test.lib) because\n// \"fun\" appears as UNDEF, so it doesn't resolve the symbol \"fun\", and will\n// result in a link error (the linker doesn't find the pragma directive).\n// So, a workaround is to force linkage with the modules that include weak\n// definitions, with the following macro: WIN_FORCE_LINK()\n\n#define WIN_WEAK_ALIAS(Name, Default)                                          \\\n  __pragma(comment(linker, \"/alternatename:\" WIN_SYM_PREFIX STRINGIFY(Name) \"=\"\\\n                                             WIN_SYM_PREFIX STRINGIFY(Default)))\n\n#define WIN_FORCE_LINK(Name)                                                   \\\n  __pragma(comment(linker, \"/include:\" WIN_SYM_PREFIX STRINGIFY(Name)))\n\n#define WIN_EXPORT(ExportedName, Name)                                         \\\n  __pragma(comment(linker, \"/export:\" WIN_EXPORT_PREFIX STRINGIFY(ExportedName)\\\n                                  \"=\" WIN_EXPORT_PREFIX STRINGIFY(Name)))\n\n// We cannot define weak functions on Windows, but we can use WIN_WEAK_ALIAS()\n// which defines an alias to a default implementation, and only works when\n// linking statically.\n// So, to define a weak function \"fun\", we define a default implementation with\n// a different name \"fun__def\" and we create a \"weak alias\" fun = fun__def.\n// Then, users can override it just defining \"fun\".\n// We impose \"extern \"C\"\" because otherwise WIN_WEAK_ALIAS() will fail because\n// of name mangling.\n\n// Dummy name for default implementation of weak function.\n# define WEAK_DEFAULT_NAME(Name) Name##__def\n// Name for exported implementation of weak function.\n# define WEAK_EXPORT_NAME(Name) Name##__dll\n\n// Use this macro when you need to define and export a weak function from a\n// library. For example:\n//   WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }\n# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...)                            \\\n  WIN_WEAK_ALIAS(Name, WEAK_DEFAULT_NAME(Name))                                \\\n  WIN_EXPORT(WEAK_EXPORT_NAME(Name), Name)                                     \\\n  extern \"C\" ReturnType Name(__VA_ARGS__);                                     \\\n  extern \"C\" ReturnType WEAK_DEFAULT_NAME(Name)(__VA_ARGS__)\n\n// Use this macro when you need to import a weak function from a library. It\n// defines a weak alias to the imported function from the dll. For example:\n//   WIN_WEAK_IMPORT_DEF(compare)\n# define WIN_WEAK_IMPORT_DEF(Name)                                             \\\n  WIN_WEAK_ALIAS(Name, WEAK_EXPORT_NAME(Name))\n\n// So, for Windows we provide something similar to weak symbols in Linux, with\n// some differences:\n// + A default implementation must always be provided.\n//\n// + When linking statically it works quite similarly. For example:\n//\n//   // libExample.cc\n//   WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }\n//\n//   // client.cc\n//   // We can use the default implementation from the library:\n//   compare(1, 2);\n//   // Or we can override it:\n//   extern \"C\" bool compare (int a, int b) { return a >= b; }\n//\n//  And it will work fine. If we don't override the function, we need to ensure\n//  that the linker includes the object file with the default implementation.\n//  We can do so with the linker option \"-wholearchive:\".\n//\n// + When linking dynamically with a library (dll), weak functions are exported\n//  with \"__dll\" suffix. Clients can use the macro WIN_WEAK_IMPORT_DEF(fun)\n//  which defines a \"weak alias\" fun = fun__dll.\n//\n//   // libExample.cc\n//   WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }\n//\n//   // client.cc\n//   WIN_WEAK_IMPORT_DEF(compare)\n//   // We can use the default implementation from the library:\n//   compare(1, 2);\n//   // Or we can override it:\n//   extern \"C\" bool compare (int a, int b) { return a >= b; }\n//\n//  But if we override the function, the dlls don't have access to it (which\n//  is different in linux). If that is desired, the strong definition must be\n//  exported and interception can be used from the rest of the dlls.\n//\n//   // libExample.cc\n//   WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }\n//   // When initialized, check if the main executable defined \"compare\".\n//   int libExample_init() {\n//     uptr fnptr = __interception::InternalGetProcAddress(\n//         (void *)GetModuleHandleA(0), \"compare\");\n//     if (fnptr && !__interception::OverrideFunction((uptr)compare, fnptr, 0))\n//       abort();\n//     return 0;\n//   }\n//\n//   // client.cc\n//   WIN_WEAK_IMPORT_DEF(compare)\n//   // We override and export compare:\n//   extern \"C\" __declspec(dllexport) bool compare (int a, int b) {\n//     return a >= b;\n//   }\n//\n\n#else // SANITIZER_GO\n\n// Go neither needs nor wants weak references.\n// The shenanigans above don't work for gcc.\n# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...)                            \\\n  extern \"C\" ReturnType Name(__VA_ARGS__)\n\n#endif // SANITIZER_GO\n\n#endif // SANITIZER_WINDOWS\n#endif // SANITIZER_WIN_DEFS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win_dll_thunk.cpp",
    "content": "//===-- sanitizer_win_dll_thunk.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// This file defines a family of thunks that should be statically linked into\n// the DLLs that have instrumentation in order to delegate the calls to the\n// shared runtime that lives in the main binary.\n// See https://github.com/google/sanitizers/issues/209 for the details.\n//===----------------------------------------------------------------------===//\n\n#ifdef SANITIZER_DLL_THUNK\n#include \"sanitizer_win_defs.h\"\n#include \"sanitizer_win_dll_thunk.h\"\n#include \"interception/interception.h\"\n\nextern \"C\" {\nvoid *WINAPI GetModuleHandleA(const char *module_name);\nvoid abort();\n}\n\nnamespace __sanitizer {\nuptr dllThunkGetRealAddrOrDie(const char *name) {\n  uptr ret =\n      __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);\n  if (!ret)\n    abort();\n  return ret;\n}\n\nint dllThunkIntercept(const char* main_function, uptr dll_function) {\n  uptr wrapper = dllThunkGetRealAddrOrDie(main_function);\n  if (!__interception::OverrideFunction(dll_function, wrapper, 0))\n    abort();\n  return 0;\n}\n\nint dllThunkInterceptWhenPossible(const char* main_function,\n    const char* default_function, uptr dll_function) {\n  uptr wrapper = __interception::InternalGetProcAddress(\n    (void *)GetModuleHandleA(0), main_function);\n  if (!wrapper)\n    wrapper = dllThunkGetRealAddrOrDie(default_function);\n  if (!__interception::OverrideFunction(dll_function, wrapper, 0))\n    abort();\n  return 0;\n}\n} // namespace __sanitizer\n\n// Include Sanitizer Common interface.\n#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)\n#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)\n#include \"sanitizer_common_interface.inc\"\n\n#pragma section(\".DLLTH$A\", read)\n#pragma section(\".DLLTH$Z\", read)\n\ntypedef void (*DllThunkCB)();\nextern \"C\" {\n__declspec(allocate(\".DLLTH$A\")) DllThunkCB __start_dll_thunk;\n__declspec(allocate(\".DLLTH$Z\")) DllThunkCB __stop_dll_thunk;\n}\n\n// Disable compiler warnings that show up if we declare our own version\n// of a compiler intrinsic (e.g. strlen).\n#pragma warning(disable: 4391)\n#pragma warning(disable: 4392)\n\nextern \"C\" int __dll_thunk_init() {\n  static bool flag = false;\n  // __dll_thunk_init is expected to be called by only one thread.\n  if (flag) return 0;\n  flag = true;\n\n  for (DllThunkCB *it = &__start_dll_thunk; it < &__stop_dll_thunk; ++it)\n    if (*it)\n      (*it)();\n\n  // In DLLs, the callbacks are expected to return 0,\n  // otherwise CRT initialization fails.\n  return 0;\n}\n\n// We want to call dll_thunk_init before C/C++ initializers / constructors are\n// executed, otherwise functions like memset might be invoked.\n#pragma section(\".CRT$XIB\", long, read)\n__declspec(allocate(\".CRT$XIB\")) int (*__dll_thunk_preinit)() =\n    __dll_thunk_init;\n\nstatic void WINAPI dll_thunk_thread_init(void *mod, unsigned long reason,\n                                         void *reserved) {\n  if (reason == /*DLL_PROCESS_ATTACH=*/1) __dll_thunk_init();\n}\n\n#pragma section(\".CRT$XLAB\", long, read)\n__declspec(allocate(\".CRT$XLAB\")) void (WINAPI *__dll_thunk_tls_init)(void *,\n    unsigned long, void *) = dll_thunk_thread_init;\n\n#endif // SANITIZER_DLL_THUNK\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win_dll_thunk.h",
    "content": "//===-- sanitizer_win_dll_thunk.h -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// This header provide helper macros to delegate calls to the shared runtime\n// that lives in the main executable. It should be included to dll_thunks that\n// will be linked to the dlls, when the sanitizer is a static library included\n// in the main executable.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_WIN_DLL_THUNK_H\n#define SANITIZER_WIN_DLL_THUNK_H\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\nuptr dllThunkGetRealAddrOrDie(const char *name);\n\nint dllThunkIntercept(const char* main_function, uptr dll_function);\n\nint dllThunkInterceptWhenPossible(const char* main_function,\n    const char* default_function, uptr dll_function);\n}\n\nextern \"C\" int __dll_thunk_init();\n\n// ----------------- Function interception helper macros -------------------- //\n// Override dll_function with main_function from main executable.\n#define INTERCEPT_OR_DIE(main_function, dll_function)                          \\\n  static int intercept_##dll_function() {                                      \\\n    return __sanitizer::dllThunkIntercept(main_function, (__sanitizer::uptr)   \\\n        dll_function);                                                         \\\n  }                                                                            \\\n  __pragma(section(\".DLLTH$M\", long, read))                                    \\\n  __declspec(allocate(\".DLLTH$M\")) int (*__dll_thunk_##dll_function)() =       \\\n    intercept_##dll_function;\n\n// Try to override dll_function with main_function from main executable.\n// If main_function is not present, override dll_function with default_function.\n#define INTERCEPT_WHEN_POSSIBLE(main_function, default_function, dll_function) \\\n  static int intercept_##dll_function() {                                      \\\n    return __sanitizer::dllThunkInterceptWhenPossible(main_function,           \\\n        default_function, (__sanitizer::uptr)dll_function);                    \\\n  }                                                                            \\\n  __pragma(section(\".DLLTH$M\", long, read))                                    \\\n  __declspec(allocate(\".DLLTH$M\")) int (*__dll_thunk_##dll_function)() =       \\\n    intercept_##dll_function;\n\n// -------------------- Function interception macros ------------------------ //\n// Special case of hooks -- ASan own interface functions.  Those are only called\n// after __asan_init, thus an empty implementation is sufficient.\n#define INTERCEPT_SANITIZER_FUNCTION(name)                                     \\\n  extern \"C\" __declspec(noinline) void name() {                                \\\n    volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__;                  \\\n    static const char function_name[] = #name;                                 \\\n    for (const char* ptr = &function_name[0]; *ptr; ++ptr)                     \\\n      prevent_icf ^= *ptr;                                                     \\\n    (void)prevent_icf;                                                         \\\n    __debugbreak();                                                            \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name)\n\n// Special case of hooks -- Weak functions, could be redefined in the main\n// executable, but that is not necessary, so we shouldn't die if we can not find\n// a reference. Instead, when the function is not present in the main executable\n// we consider the default impl provided by asan library.\n#define INTERCEPT_SANITIZER_WEAK_FUNCTION(name)                                \\\n  extern \"C\" __declspec(noinline) void name() {                                \\\n    volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__;                  \\\n    static const char function_name[] = #name;                                 \\\n    for (const char* ptr = &function_name[0]; *ptr; ++ptr)                     \\\n      prevent_icf ^= *ptr;                                                     \\\n    (void)prevent_icf;                                                         \\\n    __debugbreak();                                                            \\\n  }                                                                            \\\n  INTERCEPT_WHEN_POSSIBLE(#name, STRINGIFY(WEAK_EXPORT_NAME(name)), name)\n\n// We can't define our own version of strlen etc. because that would lead to\n// link-time or even type mismatch errors.  Instead, we can declare a function\n// just to be able to get its address.  Me may miss the first few calls to the\n// functions since it can be called before __dll_thunk_init, but that would lead\n// to false negatives in the startup code before user's global initializers,\n// which isn't a big deal.\n#define INTERCEPT_LIBRARY_FUNCTION(name)                                       \\\n  extern \"C\" void name();                                                      \\\n  INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)\n\n// Use these macros for functions that could be called before __dll_thunk_init()\n// is executed and don't lead to errors if defined (free, malloc, etc).\n#define INTERCEPT_WRAP_V_V(name)                                               \\\n  extern \"C\" void name() {                                                     \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    fn();                                                                      \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_V_W(name)                                               \\\n  extern \"C\" void name(void *arg) {                                            \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    fn(arg);                                                                   \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_V_WW(name)                                              \\\n  extern \"C\" void name(void *arg1, void *arg2) {                               \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    fn(arg1, arg2);                                                            \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_V_WWW(name)                                             \\\n  extern \"C\" void name(void *arg1, void *arg2, void *arg3) {                   \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    fn(arg1, arg2, arg3);                                                      \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_V(name)                                               \\\n  extern \"C\" void *name() {                                                    \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn();                                                               \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_W(name)                                               \\\n  extern \"C\" void *name(void *arg) {                                           \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn(arg);                                                            \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_WW(name)                                              \\\n  extern \"C\" void *name(void *arg1, void *arg2) {                              \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn(arg1, arg2);                                                     \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_WWW(name)                                             \\\n  extern \"C\" void *name(void *arg1, void *arg2, void *arg3) {                  \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn(arg1, arg2, arg3);                                               \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_WWWW(name)                                            \\\n  extern \"C\" void *name(void *arg1, void *arg2, void *arg3, void *arg4) {      \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn(arg1, arg2, arg3, arg4);                                         \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_WWWWW(name)                                           \\\n  extern \"C\" void *name(void *arg1, void *arg2, void *arg3, void *arg4,        \\\n                        void *arg5) {                                          \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn(arg1, arg2, arg3, arg4, arg5);                                   \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#define INTERCEPT_WRAP_W_WWWWWW(name)                                          \\\n  extern \"C\" void *name(void *arg1, void *arg2, void *arg3, void *arg4,        \\\n                        void *arg5, void *arg6) {                              \\\n    typedef decltype(name) *fntype;                                            \\\n    static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name);   \\\n    return fn(arg1, arg2, arg3, arg4, arg5, arg6);                             \\\n  }                                                                            \\\n  INTERCEPT_OR_DIE(#name, name);\n\n#endif // SANITIZER_WIN_DLL_THUNK_H\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cpp",
    "content": "//===-- santizer_win_dynamic_runtime_thunk.cpp ----------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file defines things that need to be present in the application modules\n// to interact with Sanitizer Common, when it is included in a dll.\n//\n//===----------------------------------------------------------------------===//\n#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK\n#define SANITIZER_IMPORT_INTERFACE 1\n#include \"sanitizer_win_defs.h\"\n// Define weak alias for all weak functions imported from sanitizer common.\n#define INTERFACE_FUNCTION(Name)\n#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)\n#include \"sanitizer_common_interface.inc\"\n#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK\n\nnamespace __sanitizer {\n// Add one, otherwise unused, external symbol to this object file so that the\n// Visual C++ linker includes it and reads the .drective section.\nvoid ForceWholeArchiveIncludeForSanitizerCommon() {}\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win_weak_interception.cpp",
    "content": "//===-- sanitizer_win_weak_interception.cpp -------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// This module should be included in the sanitizer when it is implemented as a\n// shared library on Windows (dll), in order to delegate the calls of weak\n// functions to the implementation in the main executable when a strong\n// definition is provided.\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_platform.h\"\n#if SANITIZER_WINDOWS && SANITIZER_DYNAMIC\n#include \"sanitizer_win_weak_interception.h\"\n#include \"sanitizer_allocator_interface.h\"\n#include \"sanitizer_interface_internal.h\"\n#include \"sanitizer_win_defs.h\"\n#include \"interception/interception.h\"\n\nextern \"C\" {\nvoid *WINAPI GetModuleHandleA(const char *module_name);\nvoid abort();\n}\n\nnamespace __sanitizer {\n// Try to get a pointer to real_function in the main module and override\n// dll_function with that pointer. If the function isn't found, nothing changes.\nint interceptWhenPossible(uptr dll_function, const char *real_function) {\n  uptr real = __interception::InternalGetProcAddress(\n      (void *)GetModuleHandleA(0), real_function);\n  if (real && !__interception::OverrideFunction((uptr)dll_function, real, 0))\n    abort();\n  return 0;\n}\n} // namespace __sanitizer\n\n// Declare weak hooks.\nextern \"C\" {\nvoid __sanitizer_on_print(const char *str);\nvoid __sanitizer_weak_hook_memcmp(uptr called_pc, const void *s1,\n                                  const void *s2, uptr n, int result);\nvoid __sanitizer_weak_hook_strcmp(uptr called_pc, const char *s1,\n                                  const char *s2, int result);\nvoid __sanitizer_weak_hook_strncmp(uptr called_pc, const char *s1,\n                                   const char *s2, uptr n, int result);\nvoid __sanitizer_weak_hook_strstr(uptr called_pc, const char *s1,\n                                  const char *s2, char *result);\n}\n\n// Include Sanitizer Common interface.\n#define INTERFACE_FUNCTION(Name)\n#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)\n#include \"sanitizer_common_interface.inc\"\n\n#pragma section(\".WEAK$A\", read)\n#pragma section(\".WEAK$Z\", read)\n\ntypedef void (*InterceptCB)();\nextern \"C\" {\n__declspec(allocate(\".WEAK$A\")) InterceptCB __start_weak_list;\n__declspec(allocate(\".WEAK$Z\")) InterceptCB __stop_weak_list;\n}\n\nstatic int weak_intercept_init() {\n  static bool flag = false;\n  // weak_interception_init is expected to be called by only one thread.\n  if (flag) return 0;\n  flag = true;\n\n  for (InterceptCB *it = &__start_weak_list; it < &__stop_weak_list; ++it)\n    if (*it)\n      (*it)();\n\n  // In DLLs, the callbacks are expected to return 0,\n  // otherwise CRT initialization fails.\n  return 0;\n}\n\n#pragma section(\".CRT$XIB\", long, read)\n__declspec(allocate(\".CRT$XIB\")) int (*__weak_intercept_preinit)() =\n    weak_intercept_init;\n\nstatic void WINAPI weak_intercept_thread_init(void *mod, unsigned long reason,\n                                              void *reserved) {\n  if (reason == /*DLL_PROCESS_ATTACH=*/1) weak_intercept_init();\n}\n\n#pragma section(\".CRT$XLAB\", long, read)\n__declspec(allocate(\".CRT$XLAB\")) void(WINAPI *__weak_intercept_tls_init)(\n    void *, unsigned long, void *) = weak_intercept_thread_init;\n\n#endif // SANITIZER_WINDOWS && SANITIZER_DYNAMIC\n"
  },
  {
    "path": "runtime/sanitizer_common/sanitizer_win_weak_interception.h",
    "content": "//===-- sanitizer_win_weak_interception.h ---------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// This header provide helper macros to delegate calls of weak functions to the\n// implementation in the main executable when a strong definition is present.\n//===----------------------------------------------------------------------===//\n#ifndef SANITIZER_WIN_WEAK_INTERCEPTION_H\n#define SANITIZER_WIN_WEAK_INTERCEPTION_H\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\nint interceptWhenPossible(uptr dll_function, const char *real_function);\n}\n\n// ----------------- Function interception helper macros -------------------- //\n// Weak functions, could be redefined in the main executable, but that is not\n// necessary, so we shouldn't die if we can not find a reference.\n#define INTERCEPT_WEAK(Name) interceptWhenPossible((uptr) Name, #Name);\n\n#define INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)                                \\\n  static int intercept_##Name() {                                              \\\n    return __sanitizer::interceptWhenPossible((__sanitizer::uptr) Name, #Name);\\\n  }                                                                            \\\n  __pragma(section(\".WEAK$M\", long, read))                                     \\\n  __declspec(allocate(\".WEAK$M\")) int (*__weak_intercept_##Name)() =           \\\n      intercept_##Name;\n\n#endif // SANITIZER_WIN_WEAK_INTERCEPTION_H\n"
  },
  {
    "path": "runtime/sanitizer_common/scripts/check_lint.sh",
    "content": "#!/bin/sh\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"$0\")\" && pwd)\"\n\nif [ \"${COMPILER_RT}\" = \"\" ]; then\n  COMPILER_RT=$(readlink -f $SCRIPT_DIR/../../..)\nfi\n\n# python tools setup\nCPPLINT=${SCRIPT_DIR}/cpplint.py\nLITLINT=${SCRIPT_DIR}/litlint.py\nif [ \"${PYTHON_EXECUTABLE}\" != \"\" ]; then\n  CPPLINT=\"${PYTHON_EXECUTABLE} ${CPPLINT}\"\n  LITLINT=\"${PYTHON_EXECUTABLE} ${LITLINT}\"\nfi\n\n# Filters\n# TODO: remove some of these filters\nCOMMON_LINT_FILTER=-build/include,-build/header_guard,-legal/copyright,-whitespace/comments,-readability/casting,\\\n-build/namespaces,-build/c++11,-runtime/int,-runtime/references,-readability/todo,-whitespace/parens\n\nCOMMON_LIT_TEST_LINT_FILTER=-whitespace/indent,-whitespace/line_length,-runtime/arrays,-readability/braces\n\nASAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}\nASAN_TEST_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/printf,-runtime/threadsafe_fn\nASAN_LIT_TEST_LINT_FILTER=${ASAN_TEST_LINT_FILTER},${COMMON_LIT_TEST_LINT_FILTER}\n\nTSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER},-readability/braces\nTSAN_TEST_LINT_FILTER=${TSAN_RTL_LINT_FILTER},-runtime/threadsafe_fn\nTSAN_LIT_TEST_LINT_FILTER=${TSAN_TEST_LINT_FILTER},${COMMON_LIT_TEST_LINT_FILTER}\n\nMSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}\n\nLSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}\nLSAN_LIT_TEST_LINT_FILTER=${LSAN_RTL_LINT_FILTER},${COMMON_LIT_TEST_LINT_FILTER}\n\nDFSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER}\nSCUDO_RTL_LINT_FILTER=${COMMON_LINT_FILTER}\n\nCOMMON_RTL_INC_LINT_FILTER=${COMMON_LINT_FILTER}\n\nSANITIZER_INCLUDES_LINT_FILTER=${COMMON_LINT_FILTER}\n\nMKTEMP_DIR=$(mktemp -qd /tmp/check_lint.XXXXXXXXXX)\nMKTEMP=\"mktemp -q ${MKTEMP_DIR}/tmp.XXXXXXXXXX\"\ncleanup() {\n  rm -rf $MKTEMP_DIR\n}\ntrap cleanup EXIT\n\nEXITSTATUS=0\nERROR_LOG=$(${MKTEMP})\n\nrun_lint() {\n  FILTER=$1\n  shift\n  TASK_LOG=$(${MKTEMP})\n  ${CPPLINT} --filter=${FILTER} \"$@\" > $TASK_LOG 2>&1\n  if [ \"$?\" != \"0\" ]; then\n    cat $TASK_LOG | grep -v \"Done processing\" | grep -v \"Total errors found\" \\\n      | grep -v \"Skipping input\" >> $ERROR_LOG\n  fi\n  if [ \"${SILENT}\" != \"1\" ]; then\n    cat $TASK_LOG\n  fi\n  ${LITLINT} \"$@\" 2>>$ERROR_LOG\n}\n\nLIT_TESTS=${COMPILER_RT}/test\n# Headers\nSANITIZER_INCLUDES=${COMPILER_RT}/include/sanitizer\nFUZZER_INCLUDES=${COMPILER_RT}/include/fuzzer\nrun_lint ${SANITIZER_INCLUDES_LINT_FILTER} ${SANITIZER_INCLUDES}/*.h \\\n                                           ${FUZZER_INCLUDES}/*.h &\n\n# Sanitizer_common\nCOMMON_RTL=${COMPILER_RT}/lib/sanitizer_common\nrun_lint ${COMMON_RTL_INC_LINT_FILTER} ${COMMON_RTL}/*.cpp \\\n                                       ${COMMON_RTL}/*.h \\\n                                       ${COMMON_RTL}/tests/*.cpp &\n\n# Interception\nINTERCEPTION=${COMPILER_RT}/lib/interception\nrun_lint ${ASAN_RTL_LINT_FILTER} ${INTERCEPTION}/*.cpp \\\n                                 ${INTERCEPTION}/*.h &\n\n# ASan\nASAN_RTL=${COMPILER_RT}/lib/asan\nrun_lint ${ASAN_RTL_LINT_FILTER} ${ASAN_RTL}/*.cpp \\\n                                 ${ASAN_RTL}/*.h &\nrun_lint ${ASAN_TEST_LINT_FILTER} ${ASAN_RTL}/tests/*.cpp \\\n                                  ${ASAN_RTL}/tests/*.h &\nrun_lint ${ASAN_LIT_TEST_LINT_FILTER} ${LIT_TESTS}/asan/*/*.cpp &\n\n# TSan\nTSAN_RTL=${COMPILER_RT}/lib/tsan\nrun_lint ${TSAN_RTL_LINT_FILTER} ${TSAN_RTL}/rtl/*.cpp \\\n                                 ${TSAN_RTL}/rtl/*.h &\nrun_lint ${TSAN_TEST_LINT_FILTER} ${TSAN_RTL}/tests/rtl/*.cpp \\\n                                  ${TSAN_RTL}/tests/rtl/*.h \\\n                                  ${TSAN_RTL}/tests/unit/*.cpp &\nrun_lint ${TSAN_LIT_TEST_LINT_FILTER} ${LIT_TESTS}/tsan/*.cpp &\n\n# MSan\nMSAN_RTL=${COMPILER_RT}/lib/msan\nrun_lint ${MSAN_RTL_LINT_FILTER} ${MSAN_RTL}/*.cpp \\\n                                 ${MSAN_RTL}/*.h &\n\n# LSan\nLSAN_RTL=${COMPILER_RT}/lib/lsan\nrun_lint ${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/*.cpp \\\n                                 ${LSAN_RTL}/*.h &\nrun_lint ${LSAN_LIT_TEST_LINT_FILTER} ${LIT_TESTS}/lsan/*/*.cpp &\n\n# DFSan\nDFSAN_RTL=${COMPILER_RT}/lib/dfsan\nrun_lint ${DFSAN_RTL_LINT_FILTER} ${DFSAN_RTL}/*.cpp \\\n                                  ${DFSAN_RTL}/*.h &\n${DFSAN_RTL}/scripts/check_custom_wrappers.sh >> $ERROR_LOG\n\n# Scudo\nSCUDO_RTL=${COMPILER_RT}/lib/scudo\nrun_lint ${SCUDO_RTL_LINT_FILTER} ${SCUDO_RTL}/*.cpp \\\n                                  ${SCUDO_RTL}/*.h &\n\n# Misc files\n(\nrsync -a --prune-empty-dirs --exclude='*/profile/*' --exclude='*/builtins/*' --exclude='*/xray/*' --include='*/' --include='*.inc' --exclude='*' \"${COMPILER_RT}/\" \"${MKTEMP_DIR}/\"\nfind ${MKTEMP_DIR} -type f -name '*.inc' -exec mv {} {}.cpp \\;\n( ERROR_LOG=${ERROR_LOG}.inc run_lint ${COMMON_RTL_INC_LINT_FILTER} $(find ${MKTEMP_DIR} -type f -name '*.inc.cpp') )\nsed \"s|${MKTEMP_DIR}|${COMPILER_RT}|g\" ${ERROR_LOG}.inc | sed \"s|.inc.cpp|.inc|g\" >> ${ERROR_LOG}\n) &\n\nwait\n\nif [ -s $ERROR_LOG ]; then\n  cat $ERROR_LOG\n  exit 1\nfi\n\nexit 0\n"
  },
  {
    "path": "runtime/sanitizer_common/scripts/cpplint.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright (c) 2009 Google Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#    * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#    * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n#    * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Does google-lint on c++ files.\n\nThe goal of this script is to identify places in the code that *may*\nbe in non-compliance with google style.  It does not attempt to fix\nup these problems -- the point is to educate.  It does also not\nattempt to find all problems, or to ensure that everything it does\nfind is legitimately a problem.\n\nIn particular, we can get very confused by /* and // inside strings!\nWe do a small hack, which is to ignore //'s with \"'s after them on the\nsame line, but it is far from perfect (in either direction).\n\"\"\"\n\nimport codecs\nimport copy\nimport getopt\nimport math  # for log\nimport os\nimport re\nimport sre_compile\nimport string\nimport sys\nimport unicodedata\nimport sysconfig\n\ntry:\n  xrange          # Python 2\nexcept NameError:\n  xrange = range  # Python 3\n\n\n_USAGE = \"\"\"\nSyntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]\n                   [--counting=total|toplevel|detailed] [--root=subdir]\n                   [--linelength=digits] [--headers=x,y,...]\n                   [--quiet]\n        <file> [file] ...\n\n  The style guidelines this tries to follow are those in\n    https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml\n\n  Every problem is given a confidence score from 1-5, with 5 meaning we are\n  certain of the problem, and 1 meaning it could be a legitimate construct.\n  This will miss some errors, and is not a substitute for a code review.\n\n  To suppress false-positive errors of a certain category, add a\n  'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)\n  suppresses errors of all categories on that line.\n\n  The files passed in will be linted; at least one file must be provided.\n  Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the\n  extensions with the --extensions flag.\n\n  Flags:\n\n    output=vs7\n      By default, the output is formatted to ease emacs parsing.  Visual Studio\n      compatible output (vs7) may also be used.  Other formats are unsupported.\n\n    verbose=#\n      Specify a number 0-5 to restrict errors to certain verbosity levels.\n\n    quiet\n      Don't print anything if no errors are found.\n\n    filter=-x,+y,...\n      Specify a comma-separated list of category-filters to apply: only\n      error messages whose category names pass the filters will be printed.\n      (Category names are printed with the message and look like\n      \"[whitespace/indent]\".)  Filters are evaluated left to right.\n      \"-FOO\" and \"FOO\" means \"do not print categories that start with FOO\".\n      \"+FOO\" means \"do print categories that start with FOO\".\n\n      Examples: --filter=-whitespace,+whitespace/braces\n                --filter=whitespace,runtime/printf,+runtime/printf_format\n                --filter=-,+build/include_what_you_use\n\n      To see a list of all the categories used in cpplint, pass no arg:\n         --filter=\n\n    counting=total|toplevel|detailed\n      The total number of errors found is always printed. If\n      'toplevel' is provided, then the count of errors in each of\n      the top-level categories like 'build' and 'whitespace' will\n      also be printed. If 'detailed' is provided, then a count\n      is provided for each category like 'build/class'.\n\n    root=subdir\n      The root directory used for deriving header guard CPP variable.\n      By default, the header guard CPP variable is calculated as the relative\n      path to the directory that contains .git, .hg, or .svn.  When this flag\n      is specified, the relative path is calculated from the specified\n      directory. If the specified directory does not exist, this flag is\n      ignored.\n\n      Examples:\n        Assuming that top/src/.git exists (and cwd=top/src), the header guard\n        CPP variables for top/src/chrome/browser/ui/browser.h are:\n\n        No flag => CHROME_BROWSER_UI_BROWSER_H_\n        --root=chrome => BROWSER_UI_BROWSER_H_\n        --root=chrome/browser => UI_BROWSER_H_\n        --root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_\n\n    linelength=digits\n      This is the allowed line length for the project. The default value is\n      80 characters.\n\n      Examples:\n        --linelength=120\n\n    extensions=extension,extension,...\n      The allowed file extensions that cpplint will check\n\n      Examples:\n        --extensions=hpp,cpp\n\n    headers=x,y,...\n      The header extensions that cpplint will treat as .h in checks. Values are\n      automatically added to --extensions list.\n\n      Examples:\n        --headers=hpp,hxx\n        --headers=hpp\n\n    cpplint.py supports per-directory configurations specified in CPPLINT.cfg\n    files. CPPLINT.cfg file can contain a number of key=value pairs.\n    Currently the following options are supported:\n\n      set noparent\n      filter=+filter1,-filter2,...\n      exclude_files=regex\n      linelength=80\n      root=subdir\n      headers=x,y,...\n\n    \"set noparent\" option prevents cpplint from traversing directory tree\n    upwards looking for more .cfg files in parent directories. This option\n    is usually placed in the top-level project directory.\n\n    The \"filter\" option is similar in function to --filter flag. It specifies\n    message filters in addition to the |_DEFAULT_FILTERS| and those specified\n    through --filter command-line flag.\n\n    \"exclude_files\" allows to specify a regular expression to be matched against\n    a file name. If the expression matches, the file is skipped and not run\n    through liner.\n\n    \"linelength\" allows to specify the allowed line length for the project.\n\n    The \"root\" option is similar in function to the --root flag (see example\n    above). Paths are relative to the directory of the CPPLINT.cfg.\n\n    The \"headers\" option is similar in function to the --headers flag\n    (see example above).\n\n    CPPLINT.cfg has an effect on files in the same directory and all\n    sub-directories, unless overridden by a nested configuration file.\n\n      Example file:\n        filter=-build/include_order,+build/include_alpha\n        exclude_files=.*\\.cc\n\n    The above example disables build/include_order warning and enables\n    build/include_alpha as well as excludes all .cc from being\n    processed by linter, in the current directory (where the .cfg\n    file is located) and all sub-directories.\n\"\"\"\n\n# We categorize each error message we print.  Here are the categories.\n# We want an explicit list so we can list them all in cpplint --filter=.\n# If you add a new error message with a new category, add it to the list\n# here!  cpplint_unittest.py should tell you if you forget to do this.\n_ERROR_CATEGORIES = [\n    'build/class',\n    'build/c++11',\n    'build/c++14',\n    'build/c++tr1',\n    'build/deprecated',\n    'build/endif_comment',\n    'build/explicit_make_pair',\n    'build/forward_decl',\n    'build/header_guard',\n    'build/include',\n    'build/include_alpha',\n    'build/include_order',\n    'build/include_what_you_use',\n    'build/namespaces',\n    'build/printf_format',\n    'build/storage_class',\n    'legal/copyright',\n    'readability/alt_tokens',\n    'readability/braces',\n    'readability/casting',\n    'readability/check',\n    'readability/constructors',\n    'readability/fn_size',\n    'readability/inheritance',\n    'readability/multiline_comment',\n    'readability/multiline_string',\n    'readability/namespace',\n    'readability/nolint',\n    'readability/nul',\n    'readability/strings',\n    'readability/todo',\n    'readability/utf8',\n    'runtime/arrays',\n    'runtime/casting',\n    'runtime/explicit',\n    'runtime/int',\n    'runtime/init',\n    'runtime/invalid_increment',\n    'runtime/member_string_references',\n    'runtime/memset',\n    'runtime/indentation_namespace',\n    'runtime/operator',\n    'runtime/printf',\n    'runtime/printf_format',\n    'runtime/references',\n    'runtime/string',\n    'runtime/threadsafe_fn',\n    'runtime/vlog',\n    'whitespace/blank_line',\n    'whitespace/braces',\n    'whitespace/comma',\n    'whitespace/comments',\n    'whitespace/empty_conditional_body',\n    'whitespace/empty_if_body',\n    'whitespace/empty_loop_body',\n    'whitespace/end_of_line',\n    'whitespace/ending_newline',\n    'whitespace/forcolon',\n    'whitespace/indent',\n    'whitespace/line_length',\n    'whitespace/newline',\n    'whitespace/operators',\n    'whitespace/parens',\n    'whitespace/semicolon',\n    'whitespace/tab',\n    'whitespace/todo',\n    ]\n\n# These error categories are no longer enforced by cpplint, but for backwards-\n# compatibility they may still appear in NOLINT comments.\n_LEGACY_ERROR_CATEGORIES = [\n    'readability/streams',\n    'readability/function',\n    ]\n\n# The default state of the category filter. This is overridden by the --filter=\n# flag. By default all errors are on, so only add here categories that should be\n# off by default (i.e., categories that must be enabled by the --filter= flags).\n# All entries here should start with a '-' or '+', as in the --filter= flag.\n_DEFAULT_FILTERS = ['-build/include_alpha']\n\n# The default list of categories suppressed for C (not C++) files.\n_DEFAULT_C_SUPPRESSED_CATEGORIES = [\n    'readability/casting',\n    ]\n\n# The default list of categories suppressed for Linux Kernel files.\n_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [\n    'whitespace/tab',\n    ]\n\n# We used to check for high-bit characters, but after much discussion we\n# decided those were OK, as long as they were in UTF-8 and didn't represent\n# hard-coded international strings, which belong in a separate i18n file.\n\n# C++ headers\n_CPP_HEADERS = frozenset([\n    # Legacy\n    'algobase.h',\n    'algo.h',\n    'alloc.h',\n    'builtinbuf.h',\n    'bvector.h',\n    'complex.h',\n    'defalloc.h',\n    'deque.h',\n    'editbuf.h',\n    'fstream.h',\n    'function.h',\n    'hash_map',\n    'hash_map.h',\n    'hash_set',\n    'hash_set.h',\n    'hashtable.h',\n    'heap.h',\n    'indstream.h',\n    'iomanip.h',\n    'iostream.h',\n    'istream.h',\n    'iterator.h',\n    'list.h',\n    'map.h',\n    'multimap.h',\n    'multiset.h',\n    'ostream.h',\n    'pair.h',\n    'parsestream.h',\n    'pfstream.h',\n    'procbuf.h',\n    'pthread_alloc',\n    'pthread_alloc.h',\n    'rope',\n    'rope.h',\n    'ropeimpl.h',\n    'set.h',\n    'slist',\n    'slist.h',\n    'stack.h',\n    'stdiostream.h',\n    'stl_alloc.h',\n    'stl_relops.h',\n    'streambuf.h',\n    'stream.h',\n    'strfile.h',\n    'strstream.h',\n    'tempbuf.h',\n    'tree.h',\n    'type_traits.h',\n    'vector.h',\n    # 17.6.1.2 C++ library headers\n    'algorithm',\n    'array',\n    'atomic',\n    'bitset',\n    'chrono',\n    'codecvt',\n    'complex',\n    'condition_variable',\n    'deque',\n    'exception',\n    'forward_list',\n    'fstream',\n    'functional',\n    'future',\n    'initializer_list',\n    'iomanip',\n    'ios',\n    'iosfwd',\n    'iostream',\n    'istream',\n    'iterator',\n    'limits',\n    'list',\n    'locale',\n    'map',\n    'memory',\n    'mutex',\n    'new',\n    'numeric',\n    'ostream',\n    'queue',\n    'random',\n    'ratio',\n    'regex',\n    'scoped_allocator',\n    'set',\n    'sstream',\n    'stack',\n    'stdexcept',\n    'streambuf',\n    'string',\n    'strstream',\n    'system_error',\n    'thread',\n    'tuple',\n    'typeindex',\n    'typeinfo',\n    'type_traits',\n    'unordered_map',\n    'unordered_set',\n    'utility',\n    'valarray',\n    'vector',\n    # 17.6.1.2 C++ headers for C library facilities\n    'cassert',\n    'ccomplex',\n    'cctype',\n    'cerrno',\n    'cfenv',\n    'cfloat',\n    'cinttypes',\n    'ciso646',\n    'climits',\n    'clocale',\n    'cmath',\n    'csetjmp',\n    'csignal',\n    'cstdalign',\n    'cstdarg',\n    'cstdbool',\n    'cstddef',\n    'cstdint',\n    'cstdio',\n    'cstdlib',\n    'cstring',\n    'ctgmath',\n    'ctime',\n    'cuchar',\n    'cwchar',\n    'cwctype',\n    ])\n\n# Type names\n_TYPES = re.compile(\n    r'^(?:'\n    # [dcl.type.simple]\n    r'(char(16_t|32_t)?)|wchar_t|'\n    r'bool|short|int|long|signed|unsigned|float|double|'\n    # [support.types]\n    r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'\n    # [cstdint.syn]\n    r'(u?int(_fast|_least)?(8|16|32|64)_t)|'\n    r'(u?int(max|ptr)_t)|'\n    r')$')\n\n\n# These headers are excluded from [build/include] and [build/include_order]\n# checks:\n# - Anything not following google file name conventions (containing an\n#   uppercase character, such as Python.h or nsStringAPI.h, for example).\n# - Lua headers.\n_THIRD_PARTY_HEADERS_PATTERN = re.compile(\n    r'^(?:[^/]*[A-Z][^/]*\\.h|lua\\.h|lauxlib\\.h|lualib\\.h)$')\n\n# Pattern for matching FileInfo.BaseName() against test file name\n_TEST_FILE_SUFFIX = r'(_test|_unittest|_regtest)$'\n\n# Pattern that matches only complete whitespace, possibly across multiple lines.\n_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\\s*$', re.DOTALL)\n\n# Assertion macros.  These are defined in base/logging.h and\n# testing/base/public/gunit.h.\n_CHECK_MACROS = [\n    'DCHECK', 'CHECK',\n    'EXPECT_TRUE', 'ASSERT_TRUE',\n    'EXPECT_FALSE', 'ASSERT_FALSE',\n    ]\n\n# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE\n_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])\n\nfor op, replacement in [('==', 'EQ'), ('!=', 'NE'),\n                        ('>=', 'GE'), ('>', 'GT'),\n                        ('<=', 'LE'), ('<', 'LT')]:\n  _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement\n  _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement\n  _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement\n  _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement\n\nfor op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),\n                            ('>=', 'LT'), ('>', 'LE'),\n                            ('<=', 'GT'), ('<', 'GE')]:\n  _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement\n  _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement\n\n# Alternative tokens and their replacements.  For full list, see section 2.5\n# Alternative tokens [lex.digraph] in the C++ standard.\n#\n# Digraphs (such as '%:') are not included here since it's a mess to\n# match those on a word boundary.\n_ALT_TOKEN_REPLACEMENT = {\n    'and': '&&',\n    'bitor': '|',\n    'or': '||',\n    'xor': '^',\n    'compl': '~',\n    'bitand': '&',\n    'and_eq': '&=',\n    'or_eq': '|=',\n    'xor_eq': '^=',\n    'not': '!',\n    'not_eq': '!='\n    }\n\n# Compile regular expression that matches all the above keywords.  The \"[ =()]\"\n# bit is meant to avoid matching these keywords outside of boolean expressions.\n#\n# False positives include C-style multi-line comments and multi-line strings\n# but those have always been troublesome for cpplint.\n_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(\n    r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')\n\n\n# These constants define types of headers for use with\n# _IncludeState.CheckNextIncludeOrder().\n_C_SYS_HEADER = 1\n_CPP_SYS_HEADER = 2\n_LIKELY_MY_HEADER = 3\n_POSSIBLE_MY_HEADER = 4\n_OTHER_HEADER = 5\n\n# These constants define the current inline assembly state\n_NO_ASM = 0       # Outside of inline assembly block\n_INSIDE_ASM = 1   # Inside inline assembly block\n_END_ASM = 2      # Last line of inline assembly block\n_BLOCK_ASM = 3    # The whole block is an inline assembly block\n\n# Match start of assembly blocks\n_MATCH_ASM = re.compile(r'^\\s*(?:asm|_asm|__asm|__asm__)'\n                        r'(?:\\s+(volatile|__volatile__))?'\n                        r'\\s*[{(]')\n\n# Match strings that indicate we're working on a C (not C++) file.\n_SEARCH_C_FILE = re.compile(r'\\b(?:LINT_C_FILE|'\n                            r'vim?:\\s*.*(\\s*|:)filetype=c(\\s*|:|$))')\n\n# Match string that indicates we're working on a Linux Kernel file.\n_SEARCH_KERNEL_FILE = re.compile(r'\\b(?:LINT_KERNEL_FILE)')\n\n_regexp_compile_cache = {}\n\n# {str, set(int)}: a map from error categories to sets of linenumbers\n# on which those errors are expected and should be suppressed.\n_error_suppressions = {}\n\n# The root directory used for deriving header guard CPP variable.\n# This is set by --root flag.\n_root = None\n_root_debug = False\n\n# The allowed line length of files.\n# This is set by --linelength flag.\n_line_length = 80\n\n# The allowed extensions for file names\n# This is set by --extensions flag.\n_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])\n\n# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.\n# This is set by --headers flag.\n_hpp_headers = set(['h'])\n\n# {str, bool}: a map from error categories to booleans which indicate if the\n# category should be suppressed for every line.\n_global_error_suppressions = {}\n\ndef ProcessHppHeadersOption(val):\n  global _hpp_headers\n  try:\n    _hpp_headers = set(val.split(','))\n    # Automatically append to extensions list so it does not have to be set 2 times\n    _valid_extensions.update(_hpp_headers)\n  except ValueError:\n    PrintUsage('Header extensions must be comma separated list.')\n\ndef IsHeaderExtension(file_extension):\n  return file_extension in _hpp_headers\n\ndef ParseNolintSuppressions(filename, raw_line, linenum, error):\n  \"\"\"Updates the global list of line error-suppressions.\n\n  Parses any NOLINT comments on the current line, updating the global\n  error_suppressions store.  Reports an error if the NOLINT comment\n  was malformed.\n\n  Args:\n    filename: str, the name of the input file.\n    raw_line: str, the line of input text, with comments.\n    linenum: int, the number of the current line.\n    error: function, an error handler.\n  \"\"\"\n  matched = Search(r'\\bNOLINT(NEXTLINE)?\\b(\\([^)]+\\))?', raw_line)\n  if matched:\n    if matched.group(1):\n      suppressed_line = linenum + 1\n    else:\n      suppressed_line = linenum\n    category = matched.group(2)\n    if category in (None, '(*)'):  # => \"suppress all\"\n      _error_suppressions.setdefault(None, set()).add(suppressed_line)\n    else:\n      if category.startswith('(') and category.endswith(')'):\n        category = category[1:-1]\n        if category in _ERROR_CATEGORIES:\n          _error_suppressions.setdefault(category, set()).add(suppressed_line)\n        elif category not in _LEGACY_ERROR_CATEGORIES:\n          error(filename, linenum, 'readability/nolint', 5,\n                'Unknown NOLINT error category: %s' % category)\n\n\ndef ProcessGlobalSuppresions(lines):\n  \"\"\"Updates the list of global error suppressions.\n\n  Parses any lint directives in the file that have global effect.\n\n  Args:\n    lines: An array of strings, each representing a line of the file, with the\n           last element being empty if the file is terminated with a newline.\n  \"\"\"\n  for line in lines:\n    if _SEARCH_C_FILE.search(line):\n      for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:\n        _global_error_suppressions[category] = True\n    if _SEARCH_KERNEL_FILE.search(line):\n      for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:\n        _global_error_suppressions[category] = True\n\n\ndef ResetNolintSuppressions():\n  \"\"\"Resets the set of NOLINT suppressions to empty.\"\"\"\n  _error_suppressions.clear()\n  _global_error_suppressions.clear()\n\n\ndef IsErrorSuppressedByNolint(category, linenum):\n  \"\"\"Returns true if the specified error category is suppressed on this line.\n\n  Consults the global error_suppressions map populated by\n  ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.\n\n  Args:\n    category: str, the category of the error.\n    linenum: int, the current line number.\n  Returns:\n    bool, True iff the error should be suppressed due to a NOLINT comment or\n    global suppression.\n  \"\"\"\n  return (_global_error_suppressions.get(category, False) or\n          linenum in _error_suppressions.get(category, set()) or\n          linenum in _error_suppressions.get(None, set()))\n\n\ndef Match(pattern, s):\n  \"\"\"Matches the string with the pattern, caching the compiled regexp.\"\"\"\n  # The regexp compilation caching is inlined in both Match and Search for\n  # performance reasons; factoring it out into a separate function turns out\n  # to be noticeably expensive.\n  if pattern not in _regexp_compile_cache:\n    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n  return _regexp_compile_cache[pattern].match(s)\n\n\ndef ReplaceAll(pattern, rep, s):\n  \"\"\"Replaces instances of pattern in a string with a replacement.\n\n  The compiled regex is kept in a cache shared by Match and Search.\n\n  Args:\n    pattern: regex pattern\n    rep: replacement text\n    s: search string\n\n  Returns:\n    string with replacements made (or original string if no replacements)\n  \"\"\"\n  if pattern not in _regexp_compile_cache:\n    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n  return _regexp_compile_cache[pattern].sub(rep, s)\n\n\ndef Search(pattern, s):\n  \"\"\"Searches the string for the pattern, caching the compiled regexp.\"\"\"\n  if pattern not in _regexp_compile_cache:\n    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n  return _regexp_compile_cache[pattern].search(s)\n\n\ndef _IsSourceExtension(s):\n  \"\"\"File extension (excluding dot) matches a source file extension.\"\"\"\n  return s in ('c', 'cc', 'cpp', 'cxx')\n\n\nclass _IncludeState(object):\n  \"\"\"Tracks line numbers for includes, and the order in which includes appear.\n\n  include_list contains list of lists of (header, line number) pairs.\n  It's a lists of lists rather than just one flat list to make it\n  easier to update across preprocessor boundaries.\n\n  Call CheckNextIncludeOrder() once for each header in the file, passing\n  in the type constants defined above. Calls in an illegal order will\n  raise an _IncludeError with an appropriate error message.\n\n  \"\"\"\n  # self._section will move monotonically through this set. If it ever\n  # needs to move backwards, CheckNextIncludeOrder will raise an error.\n  _INITIAL_SECTION = 0\n  _MY_H_SECTION = 1\n  _C_SECTION = 2\n  _CPP_SECTION = 3\n  _OTHER_H_SECTION = 4\n\n  _TYPE_NAMES = {\n      _C_SYS_HEADER: 'C system header',\n      _CPP_SYS_HEADER: 'C++ system header',\n      _LIKELY_MY_HEADER: 'header this file implements',\n      _POSSIBLE_MY_HEADER: 'header this file may implement',\n      _OTHER_HEADER: 'other header',\n      }\n  _SECTION_NAMES = {\n      _INITIAL_SECTION: \"... nothing. (This can't be an error.)\",\n      _MY_H_SECTION: 'a header this file implements',\n      _C_SECTION: 'C system header',\n      _CPP_SECTION: 'C++ system header',\n      _OTHER_H_SECTION: 'other header',\n      }\n\n  def __init__(self):\n    self.include_list = [[]]\n    self.ResetSection('')\n\n  def FindHeader(self, header):\n    \"\"\"Check if a header has already been included.\n\n    Args:\n      header: header to check.\n    Returns:\n      Line number of previous occurrence, or -1 if the header has not\n      been seen before.\n    \"\"\"\n    for section_list in self.include_list:\n      for f in section_list:\n        if f[0] == header:\n          return f[1]\n    return -1\n\n  def ResetSection(self, directive):\n    \"\"\"Reset section checking for preprocessor directive.\n\n    Args:\n      directive: preprocessor directive (e.g. \"if\", \"else\").\n    \"\"\"\n    # The name of the current section.\n    self._section = self._INITIAL_SECTION\n    # The path of last found header.\n    self._last_header = ''\n\n    # Update list of includes.  Note that we never pop from the\n    # include list.\n    if directive in ('if', 'ifdef', 'ifndef'):\n      self.include_list.append([])\n    elif directive in ('else', 'elif'):\n      self.include_list[-1] = []\n\n  def SetLastHeader(self, header_path):\n    self._last_header = header_path\n\n  def CanonicalizeAlphabeticalOrder(self, header_path):\n    \"\"\"Returns a path canonicalized for alphabetical comparison.\n\n    - replaces \"-\" with \"_\" so they both cmp the same.\n    - removes '-inl' since we don't require them to be after the main header.\n    - lowercase everything, just in case.\n\n    Args:\n      header_path: Path to be canonicalized.\n\n    Returns:\n      Canonicalized path.\n    \"\"\"\n    return header_path.replace('-inl.h', '.h').replace('-', '_').lower()\n\n  def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):\n    \"\"\"Check if a header is in alphabetical order with the previous header.\n\n    Args:\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      header_path: Canonicalized header to be checked.\n\n    Returns:\n      Returns true if the header is in alphabetical order.\n    \"\"\"\n    # If previous section is different from current section, _last_header will\n    # be reset to empty string, so it's always less than current header.\n    #\n    # If previous line was a blank line, assume that the headers are\n    # intentionally sorted the way they are.\n    if (self._last_header > header_path and\n        Match(r'^\\s*#\\s*include\\b', clean_lines.elided[linenum - 1])):\n      return False\n    return True\n\n  def CheckNextIncludeOrder(self, header_type):\n    \"\"\"Returns a non-empty error message if the next header is out of order.\n\n    This function also updates the internal state to be ready to check\n    the next include.\n\n    Args:\n      header_type: One of the _XXX_HEADER constants defined above.\n\n    Returns:\n      The empty string if the header is in the right order, or an\n      error message describing what's wrong.\n\n    \"\"\"\n    error_message = ('Found %s after %s' %\n                     (self._TYPE_NAMES[header_type],\n                      self._SECTION_NAMES[self._section]))\n\n    last_section = self._section\n\n    if header_type == _C_SYS_HEADER:\n      if self._section <= self._C_SECTION:\n        self._section = self._C_SECTION\n      else:\n        self._last_header = ''\n        return error_message\n    elif header_type == _CPP_SYS_HEADER:\n      if self._section <= self._CPP_SECTION:\n        self._section = self._CPP_SECTION\n      else:\n        self._last_header = ''\n        return error_message\n    elif header_type == _LIKELY_MY_HEADER:\n      if self._section <= self._MY_H_SECTION:\n        self._section = self._MY_H_SECTION\n      else:\n        self._section = self._OTHER_H_SECTION\n    elif header_type == _POSSIBLE_MY_HEADER:\n      if self._section <= self._MY_H_SECTION:\n        self._section = self._MY_H_SECTION\n      else:\n        # This will always be the fallback because we're not sure\n        # enough that the header is associated with this file.\n        self._section = self._OTHER_H_SECTION\n    else:\n      assert header_type == _OTHER_HEADER\n      self._section = self._OTHER_H_SECTION\n\n    if last_section != self._section:\n      self._last_header = ''\n\n    return ''\n\n\nclass _CppLintState(object):\n  \"\"\"Maintains module-wide state..\"\"\"\n\n  def __init__(self):\n    self.verbose_level = 1  # global setting.\n    self.error_count = 0    # global count of reported errors\n    # filters to apply when emitting error messages\n    self.filters = _DEFAULT_FILTERS[:]\n    # backup of filter list. Used to restore the state after each file.\n    self._filters_backup = self.filters[:]\n    self.counting = 'total'  # In what way are we counting errors?\n    self.errors_by_category = {}  # string to int dict storing error counts\n    self.quiet = False  # Suppress non-error messagess?\n\n    # output format:\n    # \"emacs\" - format that emacs can parse (default)\n    # \"vs7\" - format that Microsoft Visual Studio 7 can parse\n    self.output_format = 'emacs'\n\n  def SetOutputFormat(self, output_format):\n    \"\"\"Sets the output format for errors.\"\"\"\n    self.output_format = output_format\n\n  def SetQuiet(self, quiet):\n    \"\"\"Sets the module's quiet settings, and returns the previous setting.\"\"\"\n    last_quiet = self.quiet\n    self.quiet = quiet\n    return last_quiet\n\n  def SetVerboseLevel(self, level):\n    \"\"\"Sets the module's verbosity, and returns the previous setting.\"\"\"\n    last_verbose_level = self.verbose_level\n    self.verbose_level = level\n    return last_verbose_level\n\n  def SetCountingStyle(self, counting_style):\n    \"\"\"Sets the module's counting options.\"\"\"\n    self.counting = counting_style\n\n  def SetFilters(self, filters):\n    \"\"\"Sets the error-message filters.\n\n    These filters are applied when deciding whether to emit a given\n    error message.\n\n    Args:\n      filters: A string of comma-separated filters (eg \"+whitespace/indent\").\n               Each filter should start with + or -; else we die.\n\n    Raises:\n      ValueError: The comma-separated filters did not all start with '+' or '-'.\n                  E.g. \"-,+whitespace,-whitespace/indent,whitespace/badfilter\"\n    \"\"\"\n    # Default filters always have less priority than the flag ones.\n    self.filters = _DEFAULT_FILTERS[:]\n    self.AddFilters(filters)\n\n  def AddFilters(self, filters):\n    \"\"\" Adds more filters to the existing list of error-message filters. \"\"\"\n    for filt in filters.split(','):\n      clean_filt = filt.strip()\n      if clean_filt:\n        self.filters.append(clean_filt)\n    for filt in self.filters:\n      if not (filt.startswith('+') or filt.startswith('-')):\n        raise ValueError('Every filter in --filters must start with + or -'\n                         ' (%s does not)' % filt)\n\n  def BackupFilters(self):\n    \"\"\" Saves the current filter list to backup storage.\"\"\"\n    self._filters_backup = self.filters[:]\n\n  def RestoreFilters(self):\n    \"\"\" Restores filters previously backed up.\"\"\"\n    self.filters = self._filters_backup[:]\n\n  def ResetErrorCounts(self):\n    \"\"\"Sets the module's error statistic back to zero.\"\"\"\n    self.error_count = 0\n    self.errors_by_category = {}\n\n  def IncrementErrorCount(self, category):\n    \"\"\"Bumps the module's error statistic.\"\"\"\n    self.error_count += 1\n    if self.counting in ('toplevel', 'detailed'):\n      if self.counting != 'detailed':\n        category = category.split('/')[0]\n      if category not in self.errors_by_category:\n        self.errors_by_category[category] = 0\n      self.errors_by_category[category] += 1\n\n  def PrintErrorCounts(self):\n    \"\"\"Print a summary of errors by category, and the total.\"\"\"\n    for category, count in self.errors_by_category.iteritems():\n      sys.stderr.write('Category \\'%s\\' errors found: %d\\n' %\n                       (category, count))\n    sys.stdout.write('Total errors found: %d\\n' % self.error_count)\n\n_cpplint_state = _CppLintState()\n\n\ndef _OutputFormat():\n  \"\"\"Gets the module's output format.\"\"\"\n  return _cpplint_state.output_format\n\n\ndef _SetOutputFormat(output_format):\n  \"\"\"Sets the module's output format.\"\"\"\n  _cpplint_state.SetOutputFormat(output_format)\n\ndef _Quiet():\n  \"\"\"Return's the module's quiet setting.\"\"\"\n  return _cpplint_state.quiet\n\ndef _SetQuiet(quiet):\n  \"\"\"Set the module's quiet status, and return previous setting.\"\"\"\n  return _cpplint_state.SetQuiet(quiet)\n\n\ndef _VerboseLevel():\n  \"\"\"Returns the module's verbosity setting.\"\"\"\n  return _cpplint_state.verbose_level\n\n\ndef _SetVerboseLevel(level):\n  \"\"\"Sets the module's verbosity, and returns the previous setting.\"\"\"\n  return _cpplint_state.SetVerboseLevel(level)\n\n\ndef _SetCountingStyle(level):\n  \"\"\"Sets the module's counting options.\"\"\"\n  _cpplint_state.SetCountingStyle(level)\n\n\ndef _Filters():\n  \"\"\"Returns the module's list of output filters, as a list.\"\"\"\n  return _cpplint_state.filters\n\n\ndef _SetFilters(filters):\n  \"\"\"Sets the module's error-message filters.\n\n  These filters are applied when deciding whether to emit a given\n  error message.\n\n  Args:\n    filters: A string of comma-separated filters (eg \"whitespace/indent\").\n             Each filter should start with + or -; else we die.\n  \"\"\"\n  _cpplint_state.SetFilters(filters)\n\ndef _AddFilters(filters):\n  \"\"\"Adds more filter overrides.\n\n  Unlike _SetFilters, this function does not reset the current list of filters\n  available.\n\n  Args:\n    filters: A string of comma-separated filters (eg \"whitespace/indent\").\n             Each filter should start with + or -; else we die.\n  \"\"\"\n  _cpplint_state.AddFilters(filters)\n\ndef _BackupFilters():\n  \"\"\" Saves the current filter list to backup storage.\"\"\"\n  _cpplint_state.BackupFilters()\n\ndef _RestoreFilters():\n  \"\"\" Restores filters previously backed up.\"\"\"\n  _cpplint_state.RestoreFilters()\n\nclass _FunctionState(object):\n  \"\"\"Tracks current function name and the number of lines in its body.\"\"\"\n\n  _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.\n  _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.\n\n  def __init__(self):\n    self.in_a_function = False\n    self.lines_in_function = 0\n    self.current_function = ''\n\n  def Begin(self, function_name):\n    \"\"\"Start analyzing function body.\n\n    Args:\n      function_name: The name of the function being tracked.\n    \"\"\"\n    self.in_a_function = True\n    self.lines_in_function = 0\n    self.current_function = function_name\n\n  def Count(self):\n    \"\"\"Count line in current function body.\"\"\"\n    if self.in_a_function:\n      self.lines_in_function += 1\n\n  def Check(self, error, filename, linenum):\n    \"\"\"Report if too many lines in function body.\n\n    Args:\n      error: The function to call with any errors found.\n      filename: The name of the current file.\n      linenum: The number of the line to check.\n    \"\"\"\n    if not self.in_a_function:\n      return\n\n    if Match(r'T(EST|est)', self.current_function):\n      base_trigger = self._TEST_TRIGGER\n    else:\n      base_trigger = self._NORMAL_TRIGGER\n    trigger = base_trigger * 2**_VerboseLevel()\n\n    if self.lines_in_function > trigger:\n      error_level = int(math.log(self.lines_in_function / base_trigger, 2))\n      # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...\n      if error_level > 5:\n        error_level = 5\n      error(filename, linenum, 'readability/fn_size', error_level,\n            'Small and focused functions are preferred:'\n            ' %s has %d non-comment lines'\n            ' (error triggered by exceeding %d lines).'  % (\n                self.current_function, self.lines_in_function, trigger))\n\n  def End(self):\n    \"\"\"Stop analyzing function body.\"\"\"\n    self.in_a_function = False\n\n\nclass _IncludeError(Exception):\n  \"\"\"Indicates a problem with the include order in a file.\"\"\"\n  pass\n\n\nclass FileInfo(object):\n  \"\"\"Provides utility functions for filenames.\n\n  FileInfo provides easy access to the components of a file's path\n  relative to the project root.\n  \"\"\"\n\n  def __init__(self, filename):\n    self._filename = filename\n\n  def FullName(self):\n    \"\"\"Make Windows paths like Unix.\"\"\"\n    return os.path.abspath(self._filename).replace('\\\\', '/')\n\n  def RepositoryName(self):\n    \"\"\"FullName after removing the local path to the repository.\n\n    If we have a real absolute path name here we can try to do something smart:\n    detecting the root of the checkout and truncating /path/to/checkout from\n    the name so that we get header guards that don't include things like\n    \"C:\\Documents and Settings\\...\" or \"/home/username/...\" in them and thus\n    people on different computers who have checked the source out to different\n    locations won't see bogus errors.\n    \"\"\"\n    fullname = self.FullName()\n\n    if os.path.exists(fullname):\n      project_dir = os.path.dirname(fullname)\n\n      if os.path.exists(os.path.join(project_dir, \".svn\")):\n        # If there's a .svn file in the current directory, we recursively look\n        # up the directory tree for the top of the SVN checkout\n        root_dir = project_dir\n        one_up_dir = os.path.dirname(root_dir)\n        while os.path.exists(os.path.join(one_up_dir, \".svn\")):\n          root_dir = os.path.dirname(root_dir)\n          one_up_dir = os.path.dirname(one_up_dir)\n\n        prefix = os.path.commonprefix([root_dir, project_dir])\n        return fullname[len(prefix) + 1:]\n\n      # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by\n      # searching up from the current path.\n      root_dir = current_dir = os.path.dirname(fullname)\n      while current_dir != os.path.dirname(current_dir):\n        if (os.path.exists(os.path.join(current_dir, \".git\")) or\n            os.path.exists(os.path.join(current_dir, \".hg\")) or\n            os.path.exists(os.path.join(current_dir, \".svn\"))):\n          root_dir = current_dir\n        current_dir = os.path.dirname(current_dir)\n\n      if (os.path.exists(os.path.join(root_dir, \".git\")) or\n          os.path.exists(os.path.join(root_dir, \".hg\")) or\n          os.path.exists(os.path.join(root_dir, \".svn\"))):\n        prefix = os.path.commonprefix([root_dir, project_dir])\n        return fullname[len(prefix) + 1:]\n\n    # Don't know what to do; header guard warnings may be wrong...\n    return fullname\n\n  def Split(self):\n    \"\"\"Splits the file into the directory, basename, and extension.\n\n    For 'chrome/browser/browser.cc', Split() would\n    return ('chrome/browser', 'browser', '.cc')\n\n    Returns:\n      A tuple of (directory, basename, extension).\n    \"\"\"\n\n    googlename = self.RepositoryName()\n    project, rest = os.path.split(googlename)\n    return (project,) + os.path.splitext(rest)\n\n  def BaseName(self):\n    \"\"\"File base name - text after the final slash, before the final period.\"\"\"\n    return self.Split()[1]\n\n  def Extension(self):\n    \"\"\"File extension - text following the final period.\"\"\"\n    return self.Split()[2]\n\n  def NoExtension(self):\n    \"\"\"File has no source file extension.\"\"\"\n    return '/'.join(self.Split()[0:2])\n\n  def IsSource(self):\n    \"\"\"File has a source file extension.\"\"\"\n    return _IsSourceExtension(self.Extension()[1:])\n\n\ndef _ShouldPrintError(category, confidence, linenum):\n  \"\"\"If confidence >= verbose, category passes filter and is not suppressed.\"\"\"\n\n  # There are three ways we might decide not to print an error message:\n  # a \"NOLINT(category)\" comment appears in the source,\n  # the verbosity level isn't high enough, or the filters filter it out.\n  if IsErrorSuppressedByNolint(category, linenum):\n    return False\n\n  if confidence < _cpplint_state.verbose_level:\n    return False\n\n  is_filtered = False\n  for one_filter in _Filters():\n    if one_filter.startswith('-'):\n      if category.startswith(one_filter[1:]):\n        is_filtered = True\n    elif one_filter.startswith('+'):\n      if category.startswith(one_filter[1:]):\n        is_filtered = False\n    else:\n      assert False  # should have been checked for in SetFilter.\n  if is_filtered:\n    return False\n\n  return True\n\n\ndef Error(filename, linenum, category, confidence, message):\n  \"\"\"Logs the fact we've found a lint error.\n\n  We log where the error was found, and also our confidence in the error,\n  that is, how certain we are this is a legitimate style regression, and\n  not a misidentification or a use that's sometimes justified.\n\n  False positives can be suppressed by the use of\n  \"cpplint(category)\"  comments on the offending line.  These are\n  parsed into _error_suppressions.\n\n  Args:\n    filename: The name of the file containing the error.\n    linenum: The number of the line containing the error.\n    category: A string used to describe the \"category\" this bug\n      falls under: \"whitespace\", say, or \"runtime\".  Categories\n      may have a hierarchy separated by slashes: \"whitespace/indent\".\n    confidence: A number from 1-5 representing a confidence score for\n      the error, with 5 meaning that we are certain of the problem,\n      and 1 meaning that it could be a legitimate construct.\n    message: The error message.\n  \"\"\"\n  if _ShouldPrintError(category, confidence, linenum):\n    _cpplint_state.IncrementErrorCount(category)\n    if _cpplint_state.output_format == 'vs7':\n      sys.stderr.write('%s(%s): error cpplint: [%s] %s [%d]\\n' % (\n          filename, linenum, category, message, confidence))\n    elif _cpplint_state.output_format == 'eclipse':\n      sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\\n' % (\n          filename, linenum, message, category, confidence))\n    else:\n      sys.stderr.write('%s:%s:  %s  [%s] [%d]\\n' % (\n          filename, linenum, message, category, confidence))\n\n\n# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.\n_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(\n    r'\\\\([abfnrtv?\"\\\\\\']|\\d+|x[0-9a-fA-F]+)')\n# Match a single C style comment on the same line.\n_RE_PATTERN_C_COMMENTS = r'/\\*(?:[^*]|\\*(?!/))*\\*/'\n# Matches multi-line C style comments.\n# This RE is a little bit more complicated than one might expect, because we\n# have to take care of space removals tools so we can handle comments inside\n# statements better.\n# The current rule is: We only clear spaces from both sides when we're at the\n# end of the line. Otherwise, we try to remove spaces from the right side,\n# if this doesn't work we try on left side but only if there's a non-character\n# on the right.\n_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(\n    r'(\\s*' + _RE_PATTERN_C_COMMENTS + r'\\s*$|' +\n    _RE_PATTERN_C_COMMENTS + r'\\s+|' +\n    r'\\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\\W)|' +\n    _RE_PATTERN_C_COMMENTS + r')')\n\n\ndef IsCppString(line):\n  \"\"\"Does line terminate so, that the next symbol is in string constant.\n\n  This function does not consider single-line nor multi-line comments.\n\n  Args:\n    line: is a partial line of code starting from the 0..n.\n\n  Returns:\n    True, if next character appended to 'line' is inside a\n    string constant.\n  \"\"\"\n\n  line = line.replace(r'\\\\', 'XX')  # after this, \\\\\" does not match to \\\"\n  return ((line.count('\"') - line.count(r'\\\"') - line.count(\"'\\\"'\")) & 1) == 1\n\n\ndef CleanseRawStrings(raw_lines):\n  \"\"\"Removes C++11 raw strings from lines.\n\n    Before:\n      static const char kData[] = R\"(\n          multi-line string\n          )\";\n\n    After:\n      static const char kData[] = \"\"\n          (replaced by blank line)\n          \"\";\n\n  Args:\n    raw_lines: list of raw lines.\n\n  Returns:\n    list of lines with C++11 raw strings replaced by empty strings.\n  \"\"\"\n\n  delimiter = None\n  lines_without_raw_strings = []\n  for line in raw_lines:\n    if delimiter:\n      # Inside a raw string, look for the end\n      end = line.find(delimiter)\n      if end >= 0:\n        # Found the end of the string, match leading space for this\n        # line and resume copying the original lines, and also insert\n        # a \"\" on the last line.\n        leading_space = Match(r'^(\\s*)\\S', line)\n        line = leading_space.group(1) + '\"\"' + line[end + len(delimiter):]\n        delimiter = None\n      else:\n        # Haven't found the end yet, append a blank line.\n        line = '\"\"'\n\n    # Look for beginning of a raw string, and replace them with\n    # empty strings.  This is done in a loop to handle multiple raw\n    # strings on the same line.\n    while delimiter is None:\n      # Look for beginning of a raw string.\n      # See 2.14.15 [lex.string] for syntax.\n      #\n      # Once we have matched a raw string, we check the prefix of the\n      # line to make sure that the line is not part of a single line\n      # comment.  It's done this way because we remove raw strings\n      # before removing comments as opposed to removing comments\n      # before removing raw strings.  This is because there are some\n      # cpplint checks that requires the comments to be preserved, but\n      # we don't want to check comments that are inside raw strings.\n      matched = Match(r'^(.*?)\\b(?:R|u8R|uR|UR|LR)\"([^\\s\\\\()]*)\\((.*)$', line)\n      if (matched and\n          not Match(r'^([^\\'\"]|\\'(\\\\.|[^\\'])*\\'|\"(\\\\.|[^\"])*\")*//',\n                    matched.group(1))):\n        delimiter = ')' + matched.group(2) + '\"'\n\n        end = matched.group(3).find(delimiter)\n        if end >= 0:\n          # Raw string ended on same line\n          line = (matched.group(1) + '\"\"' +\n                  matched.group(3)[end + len(delimiter):])\n          delimiter = None\n        else:\n          # Start of a multi-line raw string\n          line = matched.group(1) + '\"\"'\n      else:\n        break\n\n    lines_without_raw_strings.append(line)\n\n  # TODO(unknown): if delimiter is not None here, we might want to\n  # emit a warning for unterminated string.\n  return lines_without_raw_strings\n\n\ndef FindNextMultiLineCommentStart(lines, lineix):\n  \"\"\"Find the beginning marker for a multiline comment.\"\"\"\n  while lineix < len(lines):\n    if lines[lineix].strip().startswith('/*'):\n      # Only return this marker if the comment goes beyond this line\n      if lines[lineix].strip().find('*/', 2) < 0:\n        return lineix\n    lineix += 1\n  return len(lines)\n\n\ndef FindNextMultiLineCommentEnd(lines, lineix):\n  \"\"\"We are inside a comment, find the end marker.\"\"\"\n  while lineix < len(lines):\n    if lines[lineix].strip().endswith('*/'):\n      return lineix\n    lineix += 1\n  return len(lines)\n\n\ndef RemoveMultiLineCommentsFromRange(lines, begin, end):\n  \"\"\"Clears a range of lines for multi-line comments.\"\"\"\n  # Having // dummy comments makes the lines non-empty, so we will not get\n  # unnecessary blank line warnings later in the code.\n  for i in range(begin, end):\n    lines[i] = '/**/'\n\n\ndef RemoveMultiLineComments(filename, lines, error):\n  \"\"\"Removes multiline (c-style) comments from lines.\"\"\"\n  lineix = 0\n  while lineix < len(lines):\n    lineix_begin = FindNextMultiLineCommentStart(lines, lineix)\n    if lineix_begin >= len(lines):\n      return\n    lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)\n    if lineix_end >= len(lines):\n      error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,\n            'Could not find end of multi-line comment')\n      return\n    RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)\n    lineix = lineix_end + 1\n\n\ndef CleanseComments(line):\n  \"\"\"Removes //-comments and single-line C-style /* */ comments.\n\n  Args:\n    line: A line of C++ source.\n\n  Returns:\n    The line with single-line comments removed.\n  \"\"\"\n  commentpos = line.find('//')\n  if commentpos != -1 and not IsCppString(line[:commentpos]):\n    line = line[:commentpos].rstrip()\n  # get rid of /* ... */\n  return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)\n\n\nclass CleansedLines(object):\n  \"\"\"Holds 4 copies of all lines with different preprocessing applied to them.\n\n  1) elided member contains lines without strings and comments.\n  2) lines member contains lines without comments.\n  3) raw_lines member contains all the lines without processing.\n  4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw\n     strings removed.\n  All these members are of <type 'list'>, and of the same length.\n  \"\"\"\n\n  def __init__(self, lines):\n    self.elided = []\n    self.lines = []\n    self.raw_lines = lines\n    self.num_lines = len(lines)\n    self.lines_without_raw_strings = CleanseRawStrings(lines)\n    for linenum in range(len(self.lines_without_raw_strings)):\n      self.lines.append(CleanseComments(\n          self.lines_without_raw_strings[linenum]))\n      elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])\n      self.elided.append(CleanseComments(elided))\n\n  def NumLines(self):\n    \"\"\"Returns the number of lines represented.\"\"\"\n    return self.num_lines\n\n  @staticmethod\n  def _CollapseStrings(elided):\n    \"\"\"Collapses strings and chars on a line to simple \"\" or '' blocks.\n\n    We nix strings first so we're not fooled by text like '\"http://\"'\n\n    Args:\n      elided: The line being processed.\n\n    Returns:\n      The line with collapsed strings.\n    \"\"\"\n    if _RE_PATTERN_INCLUDE.match(elided):\n      return elided\n\n    # Remove escaped characters first to make quote/single quote collapsing\n    # basic.  Things that look like escaped characters shouldn't occur\n    # outside of strings and chars.\n    elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)\n\n    # Replace quoted strings and digit separators.  Both single quotes\n    # and double quotes are processed in the same loop, otherwise\n    # nested quotes wouldn't work.\n    collapsed = ''\n    while True:\n      # Find the first quote character\n      match = Match(r'^([^\\'\"]*)([\\'\"])(.*)$', elided)\n      if not match:\n        collapsed += elided\n        break\n      head, quote, tail = match.groups()\n\n      if quote == '\"':\n        # Collapse double quoted strings\n        second_quote = tail.find('\"')\n        if second_quote >= 0:\n          collapsed += head + '\"\"'\n          elided = tail[second_quote + 1:]\n        else:\n          # Unmatched double quote, don't bother processing the rest\n          # of the line since this is probably a multiline string.\n          collapsed += elided\n          break\n      else:\n        # Found single quote, check nearby text to eliminate digit separators.\n        #\n        # There is no special handling for floating point here, because\n        # the integer/fractional/exponent parts would all be parsed\n        # correctly as long as there are digits on both sides of the\n        # separator.  So we are fine as long as we don't see something\n        # like \"0.'3\" (gcc 4.9.0 will not allow this literal).\n        if Search(r'\\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):\n          match_literal = Match(r'^((?:\\'?[0-9a-zA-Z_])*)(.*)$', \"'\" + tail)\n          collapsed += head + match_literal.group(1).replace(\"'\", '')\n          elided = match_literal.group(2)\n        else:\n          second_quote = tail.find('\\'')\n          if second_quote >= 0:\n            collapsed += head + \"''\"\n            elided = tail[second_quote + 1:]\n          else:\n            # Unmatched single quote\n            collapsed += elided\n            break\n\n    return collapsed\n\n\ndef FindEndOfExpressionInLine(line, startpos, stack):\n  \"\"\"Find the position just after the end of current parenthesized expression.\n\n  Args:\n    line: a CleansedLines line.\n    startpos: start searching at this position.\n    stack: nesting stack at startpos.\n\n  Returns:\n    On finding matching end: (index just after matching end, None)\n    On finding an unclosed expression: (-1, None)\n    Otherwise: (-1, new stack at end of this line)\n  \"\"\"\n  for i in xrange(startpos, len(line)):\n    char = line[i]\n    if char in '([{':\n      # Found start of parenthesized expression, push to expression stack\n      stack.append(char)\n    elif char == '<':\n      # Found potential start of template argument list\n      if i > 0 and line[i - 1] == '<':\n        # Left shift operator\n        if stack and stack[-1] == '<':\n          stack.pop()\n          if not stack:\n            return (-1, None)\n      elif i > 0 and Search(r'\\boperator\\s*$', line[0:i]):\n        # operator<, don't add to stack\n        continue\n      else:\n        # Tentative start of template argument list\n        stack.append('<')\n    elif char in ')]}':\n      # Found end of parenthesized expression.\n      #\n      # If we are currently expecting a matching '>', the pending '<'\n      # must have been an operator.  Remove them from expression stack.\n      while stack and stack[-1] == '<':\n        stack.pop()\n      if not stack:\n        return (-1, None)\n      if ((stack[-1] == '(' and char == ')') or\n          (stack[-1] == '[' and char == ']') or\n          (stack[-1] == '{' and char == '}')):\n        stack.pop()\n        if not stack:\n          return (i + 1, None)\n      else:\n        # Mismatched parentheses\n        return (-1, None)\n    elif char == '>':\n      # Found potential end of template argument list.\n\n      # Ignore \"->\" and operator functions\n      if (i > 0 and\n          (line[i - 1] == '-' or Search(r'\\boperator\\s*$', line[0:i - 1]))):\n        continue\n\n      # Pop the stack if there is a matching '<'.  Otherwise, ignore\n      # this '>' since it must be an operator.\n      if stack:\n        if stack[-1] == '<':\n          stack.pop()\n          if not stack:\n            return (i + 1, None)\n    elif char == ';':\n      # Found something that look like end of statements.  If we are currently\n      # expecting a '>', the matching '<' must have been an operator, since\n      # template argument list should not contain statements.\n      while stack and stack[-1] == '<':\n        stack.pop()\n      if not stack:\n        return (-1, None)\n\n  # Did not find end of expression or unbalanced parentheses on this line\n  return (-1, stack)\n\n\ndef CloseExpression(clean_lines, linenum, pos):\n  \"\"\"If input points to ( or { or [ or <, finds the position that closes it.\n\n  If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the\n  linenum/pos that correspond to the closing of the expression.\n\n  TODO(unknown): cpplint spends a fair bit of time matching parentheses.\n  Ideally we would want to index all opening and closing parentheses once\n  and have CloseExpression be just a simple lookup, but due to preprocessor\n  tricks, this is not so easy.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    pos: A position on the line.\n\n  Returns:\n    A tuple (line, linenum, pos) pointer *past* the closing brace, or\n    (line, len(lines), -1) if we never find a close.  Note we ignore\n    strings and comments when matching; and the line we return is the\n    'cleansed' line at linenum.\n  \"\"\"\n\n  line = clean_lines.elided[linenum]\n  if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):\n    return (line, clean_lines.NumLines(), -1)\n\n  # Check first line\n  (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])\n  if end_pos > -1:\n    return (line, linenum, end_pos)\n\n  # Continue scanning forward\n  while stack and linenum < clean_lines.NumLines() - 1:\n    linenum += 1\n    line = clean_lines.elided[linenum]\n    (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)\n    if end_pos > -1:\n      return (line, linenum, end_pos)\n\n  # Did not find end of expression before end of file, give up\n  return (line, clean_lines.NumLines(), -1)\n\n\ndef FindStartOfExpressionInLine(line, endpos, stack):\n  \"\"\"Find position at the matching start of current expression.\n\n  This is almost the reverse of FindEndOfExpressionInLine, but note\n  that the input position and returned position differs by 1.\n\n  Args:\n    line: a CleansedLines line.\n    endpos: start searching at this position.\n    stack: nesting stack at endpos.\n\n  Returns:\n    On finding matching start: (index at matching start, None)\n    On finding an unclosed expression: (-1, None)\n    Otherwise: (-1, new stack at beginning of this line)\n  \"\"\"\n  i = endpos\n  while i >= 0:\n    char = line[i]\n    if char in ')]}':\n      # Found end of expression, push to expression stack\n      stack.append(char)\n    elif char == '>':\n      # Found potential end of template argument list.\n      #\n      # Ignore it if it's a \"->\" or \">=\" or \"operator>\"\n      if (i > 0 and\n          (line[i - 1] == '-' or\n           Match(r'\\s>=\\s', line[i - 1:]) or\n           Search(r'\\boperator\\s*$', line[0:i]))):\n        i -= 1\n      else:\n        stack.append('>')\n    elif char == '<':\n      # Found potential start of template argument list\n      if i > 0 and line[i - 1] == '<':\n        # Left shift operator\n        i -= 1\n      else:\n        # If there is a matching '>', we can pop the expression stack.\n        # Otherwise, ignore this '<' since it must be an operator.\n        if stack and stack[-1] == '>':\n          stack.pop()\n          if not stack:\n            return (i, None)\n    elif char in '([{':\n      # Found start of expression.\n      #\n      # If there are any unmatched '>' on the stack, they must be\n      # operators.  Remove those.\n      while stack and stack[-1] == '>':\n        stack.pop()\n      if not stack:\n        return (-1, None)\n      if ((char == '(' and stack[-1] == ')') or\n          (char == '[' and stack[-1] == ']') or\n          (char == '{' and stack[-1] == '}')):\n        stack.pop()\n        if not stack:\n          return (i, None)\n      else:\n        # Mismatched parentheses\n        return (-1, None)\n    elif char == ';':\n      # Found something that look like end of statements.  If we are currently\n      # expecting a '<', the matching '>' must have been an operator, since\n      # template argument list should not contain statements.\n      while stack and stack[-1] == '>':\n        stack.pop()\n      if not stack:\n        return (-1, None)\n\n    i -= 1\n\n  return (-1, stack)\n\n\ndef ReverseCloseExpression(clean_lines, linenum, pos):\n  \"\"\"If input points to ) or } or ] or >, finds the position that opens it.\n\n  If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the\n  linenum/pos that correspond to the opening of the expression.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    pos: A position on the line.\n\n  Returns:\n    A tuple (line, linenum, pos) pointer *at* the opening brace, or\n    (line, 0, -1) if we never find the matching opening brace.  Note\n    we ignore strings and comments when matching; and the line we\n    return is the 'cleansed' line at linenum.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  if line[pos] not in ')}]>':\n    return (line, 0, -1)\n\n  # Check last line\n  (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])\n  if start_pos > -1:\n    return (line, linenum, start_pos)\n\n  # Continue scanning backward\n  while stack and linenum > 0:\n    linenum -= 1\n    line = clean_lines.elided[linenum]\n    (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)\n    if start_pos > -1:\n      return (line, linenum, start_pos)\n\n  # Did not find start of expression before beginning of file, give up\n  return (line, 0, -1)\n\n\ndef CheckForCopyright(filename, lines, error):\n  \"\"\"Logs an error if no Copyright message appears at the top of the file.\"\"\"\n\n  # We'll say it should occur by line 10. Don't forget there's a\n  # dummy line at the front.\n  for line in xrange(1, min(len(lines), 11)):\n    if re.search(r'Copyright', lines[line], re.I): break\n  else:                       # means no copyright line was found\n    error(filename, 0, 'legal/copyright', 5,\n          'No copyright message found.  '\n          'You should have a line: \"Copyright [year] <Copyright Owner>\"')\n\n\ndef GetIndentLevel(line):\n  \"\"\"Return the number of leading spaces in line.\n\n  Args:\n    line: A string to check.\n\n  Returns:\n    An integer count of leading spaces, possibly zero.\n  \"\"\"\n  indent = Match(r'^( *)\\S', line)\n  if indent:\n    return len(indent.group(1))\n  else:\n    return 0\n\ndef PathSplitToList(path):\n  \"\"\"Returns the path split into a list by the separator.\n\n  Args:\n    path: An absolute or relative path (e.g. '/a/b/c/' or '../a')\n\n  Returns:\n    A list of path components (e.g. ['a', 'b', 'c]).\n  \"\"\"\n  lst = []\n  while True:\n    (head, tail) = os.path.split(path)\n    if head == path: # absolute paths end\n      lst.append(head)\n      break\n    if tail == path: # relative paths end\n      lst.append(tail)\n      break\n\n    path = head\n    lst.append(tail)\n\n  lst.reverse()\n  return lst\n\ndef GetHeaderGuardCPPVariable(filename):\n  \"\"\"Returns the CPP variable that should be used as a header guard.\n\n  Args:\n    filename: The name of a C++ header file.\n\n  Returns:\n    The CPP variable that should be used as a header guard in the\n    named file.\n\n  \"\"\"\n\n  # Restores original filename in case that cpplint is invoked from Emacs's\n  # flymake.\n  filename = re.sub(r'_flymake\\.h$', '.h', filename)\n  filename = re.sub(r'/\\.flymake/([^/]*)$', r'/\\1', filename)\n  # Replace 'c++' with 'cpp'.\n  filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')\n\n  fileinfo = FileInfo(filename)\n  file_path_from_root = fileinfo.RepositoryName()\n\n  def FixupPathFromRoot():\n    if _root_debug:\n      sys.stderr.write(\"\\n_root fixup, _root = '%s', repository name = '%s'\\n\"\n          %(_root, fileinfo.RepositoryName()))\n\n    # Process the file path with the --root flag if it was set.\n    if not _root:\n      if _root_debug:\n        sys.stderr.write(\"_root unspecified\\n\")\n      return file_path_from_root\n\n    def StripListPrefix(lst, prefix):\n      # f(['x', 'y'], ['w, z']) -> None  (not a valid prefix)\n      if lst[:len(prefix)] != prefix:\n        return None\n      # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']\n      return lst[(len(prefix)):]\n\n    # root behavior:\n    #   --root=subdir , lstrips subdir from the header guard\n    maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),\n                                 PathSplitToList(_root))\n\n    if _root_debug:\n      sys.stderr.write((\"_root lstrip (maybe_path=%s, file_path_from_root=%s,\" +\n          \" _root=%s)\\n\") %(maybe_path, file_path_from_root, _root))\n\n    if maybe_path:\n      return os.path.join(*maybe_path)\n\n    #   --root=.. , will prepend the outer directory to the header guard\n    full_path = fileinfo.FullName()\n    root_abspath = os.path.abspath(_root)\n\n    maybe_path = StripListPrefix(PathSplitToList(full_path),\n                                 PathSplitToList(root_abspath))\n\n    if _root_debug:\n      sys.stderr.write((\"_root prepend (maybe_path=%s, full_path=%s, \" +\n          \"root_abspath=%s)\\n\") %(maybe_path, full_path, root_abspath))\n\n    if maybe_path:\n      return os.path.join(*maybe_path)\n\n    if _root_debug:\n      sys.stderr.write(\"_root ignore, returning %s\\n\" %(file_path_from_root))\n\n    #   --root=FAKE_DIR is ignored\n    return file_path_from_root\n\n  file_path_from_root = FixupPathFromRoot()\n  return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'\n\n\ndef CheckForHeaderGuard(filename, clean_lines, error):\n  \"\"\"Checks that the file contains a header guard.\n\n  Logs an error if no #ifndef header guard is present.  For other\n  headers, checks that the full pathname is used.\n\n  Args:\n    filename: The name of the C++ header file.\n    clean_lines: A CleansedLines instance containing the file.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  # Don't check for header guards if there are error suppression\n  # comments somewhere in this file.\n  #\n  # Because this is silencing a warning for a nonexistent line, we\n  # only support the very specific NOLINT(build/header_guard) syntax,\n  # and not the general NOLINT or NOLINT(*) syntax.\n  raw_lines = clean_lines.lines_without_raw_strings\n  for i in raw_lines:\n    if Search(r'//\\s*NOLINT\\(build/header_guard\\)', i):\n      return\n\n  cppvar = GetHeaderGuardCPPVariable(filename)\n\n  ifndef = ''\n  ifndef_linenum = 0\n  define = ''\n  endif = ''\n  endif_linenum = 0\n  for linenum, line in enumerate(raw_lines):\n    linesplit = line.split()\n    if len(linesplit) >= 2:\n      # find the first occurrence of #ifndef and #define, save arg\n      if not ifndef and linesplit[0] == '#ifndef':\n        # set ifndef to the header guard presented on the #ifndef line.\n        ifndef = linesplit[1]\n        ifndef_linenum = linenum\n      if not define and linesplit[0] == '#define':\n        define = linesplit[1]\n    # find the last occurrence of #endif, save entire line\n    if line.startswith('#endif'):\n      endif = line\n      endif_linenum = linenum\n\n  if not ifndef or not define or ifndef != define:\n    error(filename, 0, 'build/header_guard', 5,\n          'No #ifndef header guard found, suggested CPP variable is: %s' %\n          cppvar)\n    return\n\n  # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__\n  # for backward compatibility.\n  if ifndef != cppvar:\n    error_level = 0\n    if ifndef != cppvar + '_':\n      error_level = 5\n\n    ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,\n                            error)\n    error(filename, ifndef_linenum, 'build/header_guard', error_level,\n          '#ifndef header guard has wrong style, please use: %s' % cppvar)\n\n  # Check for \"//\" comments on endif line.\n  ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,\n                          error)\n  match = Match(r'#endif\\s*//\\s*' + cppvar + r'(_)?\\b', endif)\n  if match:\n    if match.group(1) == '_':\n      # Issue low severity warning for deprecated double trailing underscore\n      error(filename, endif_linenum, 'build/header_guard', 0,\n            '#endif line should be \"#endif  // %s\"' % cppvar)\n    return\n\n  # Didn't find the corresponding \"//\" comment.  If this file does not\n  # contain any \"//\" comments at all, it could be that the compiler\n  # only wants \"/**/\" comments, look for those instead.\n  no_single_line_comments = True\n  for i in xrange(1, len(raw_lines) - 1):\n    line = raw_lines[i]\n    if Match(r'^(?:(?:\\'(?:\\.|[^\\'])*\\')|(?:\"(?:\\.|[^\"])*\")|[^\\'\"])*//', line):\n      no_single_line_comments = False\n      break\n\n  if no_single_line_comments:\n    match = Match(r'#endif\\s*/\\*\\s*' + cppvar + r'(_)?\\s*\\*/', endif)\n    if match:\n      if match.group(1) == '_':\n        # Low severity warning for double trailing underscore\n        error(filename, endif_linenum, 'build/header_guard', 0,\n              '#endif line should be \"#endif  /* %s */\"' % cppvar)\n      return\n\n  # Didn't find anything\n  error(filename, endif_linenum, 'build/header_guard', 5,\n        '#endif line should be \"#endif  // %s\"' % cppvar)\n\n\ndef CheckHeaderFileIncluded(filename, include_state, error):\n  \"\"\"Logs an error if a .cc file does not include its header.\"\"\"\n\n  # Do not check test files\n  fileinfo = FileInfo(filename)\n  if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):\n    return\n\n  headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'\n  if not os.path.exists(headerfile):\n    return\n  headername = FileInfo(headerfile).RepositoryName()\n  first_include = 0\n  for section_list in include_state.include_list:\n    for f in section_list:\n      if headername in f[0] or f[0] in headername:\n        return\n      if not first_include:\n        first_include = f[1]\n\n  error(filename, first_include, 'build/include', 5,\n        '%s should include its header file %s' % (fileinfo.RepositoryName(),\n                                                  headername))\n\n\ndef CheckForBadCharacters(filename, lines, error):\n  \"\"\"Logs an error for each line containing bad characters.\n\n  Two kinds of bad characters:\n\n  1. Unicode replacement characters: These indicate that either the file\n  contained invalid UTF-8 (likely) or Unicode replacement characters (which\n  it shouldn't).  Note that it's possible for this to throw off line\n  numbering if the invalid UTF-8 occurred adjacent to a newline.\n\n  2. NUL bytes.  These are problematic for some tools.\n\n  Args:\n    filename: The name of the current file.\n    lines: An array of strings, each representing a line of the file.\n    error: The function to call with any errors found.\n  \"\"\"\n  for linenum, line in enumerate(lines):\n    if u'\\ufffd' in line:\n      error(filename, linenum, 'readability/utf8', 5,\n            'Line contains invalid UTF-8 (or Unicode replacement character).')\n    if '\\0' in line:\n      error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')\n\n\ndef CheckForNewlineAtEOF(filename, lines, error):\n  \"\"\"Logs an error if there is no newline char at the end of the file.\n\n  Args:\n    filename: The name of the current file.\n    lines: An array of strings, each representing a line of the file.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  # The array lines() was created by adding two newlines to the\n  # original file (go figure), then splitting on \\n.\n  # To verify that the file ends in \\n, we just have to make sure the\n  # last-but-two element of lines() exists and is empty.\n  if len(lines) < 3 or lines[-2]:\n    error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,\n          'Could not find a newline character at the end of the file.')\n\n\ndef CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):\n  \"\"\"Logs an error if we see /* ... */ or \"...\" that extend past one line.\n\n  /* ... */ comments are legit inside macros, for one line.\n  Otherwise, we prefer // comments, so it's ok to warn about the\n  other.  Likewise, it's ok for strings to extend across multiple\n  lines, as long as a line continuation character (backslash)\n  terminates each line. Although not currently prohibited by the C++\n  style guide, it's ugly and unnecessary. We don't do well with either\n  in this lint program, so we warn about both.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Remove all \\\\ (escaped backslashes) from the line. They are OK, and the\n  # second (escaped) slash may trigger later \\\" detection erroneously.\n  line = line.replace('\\\\\\\\', '')\n\n  if line.count('/*') > line.count('*/'):\n    error(filename, linenum, 'readability/multiline_comment', 5,\n          'Complex multi-line /*...*/-style comment found. '\n          'Lint may give bogus warnings.  '\n          'Consider replacing these with //-style comments, '\n          'with #if 0...#endif, '\n          'or with more clearly structured multi-line comments.')\n\n  if (line.count('\"') - line.count('\\\\\"')) % 2:\n    error(filename, linenum, 'readability/multiline_string', 5,\n          'Multi-line string (\"...\") found.  This lint script doesn\\'t '\n          'do well with such strings, and may give bogus warnings.  '\n          'Use C++11 raw strings or concatenation instead.')\n\n\n# (non-threadsafe name, thread-safe alternative, validation pattern)\n#\n# The validation pattern is used to eliminate false positives such as:\n#  _rand();               // false positive due to substring match.\n#  ->rand();              // some member function rand().\n#  ACMRandom rand(seed);  // some variable named rand.\n#  ISAACRandom rand();    // another variable named rand.\n#\n# Basically we require the return value of these functions to be used\n# in some expression context on the same line by matching on some\n# operator before the function name.  This eliminates constructors and\n# member function calls.\n_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\\s*|>\\s+)'\n_THREADING_LIST = (\n    ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\\([^)]+\\)'),\n    ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\\([^)]+\\)'),\n    ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\\([^)]+\\)'),\n    ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\\([^)]+\\)'),\n    ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\\(\\)'),\n    ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\\([^)]+\\)'),\n    ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\\([^)]+\\)'),\n    ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\\([^)]+\\)'),\n    ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\\([^)]+\\)'),\n    ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\\(\\)'),\n    ('strtok(', 'strtok_r(',\n     _UNSAFE_FUNC_PREFIX + r'strtok\\([^)]+\\)'),\n    ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\\([^)]+\\)'),\n    )\n\n\ndef CheckPosixThreading(filename, clean_lines, linenum, error):\n  \"\"\"Checks for calls to thread-unsafe functions.\n\n  Much code has been originally written without consideration of\n  multi-threading. Also, engineers are relying on their old experience;\n  they have learned posix before threading extensions were added. These\n  tests guide the engineers to use thread-safe functions (when using\n  posix directly).\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:\n    # Additional pattern matching check to confirm that this is the\n    # function we are looking for\n    if Search(pattern, line):\n      error(filename, linenum, 'runtime/threadsafe_fn', 2,\n            'Consider using ' + multithread_safe_func +\n            '...) instead of ' + single_thread_func +\n            '...) for improved thread safety.')\n\n\ndef CheckVlogArguments(filename, clean_lines, linenum, error):\n  \"\"\"Checks that VLOG() is only used for defining a logging level.\n\n  For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and\n  VLOG(FATAL) are not.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  if Search(r'\\bVLOG\\((INFO|ERROR|WARNING|DFATAL|FATAL)\\)', line):\n    error(filename, linenum, 'runtime/vlog', 5,\n          'VLOG() should be used with numeric verbosity level.  '\n          'Use LOG() if you want symbolic severity levels.')\n\n# Matches invalid increment: *count++, which moves pointer instead of\n# incrementing a value.\n_RE_PATTERN_INVALID_INCREMENT = re.compile(\n    r'^\\s*\\*\\w+(\\+\\+|--);')\n\n\ndef CheckInvalidIncrement(filename, clean_lines, linenum, error):\n  \"\"\"Checks for invalid increment *count++.\n\n  For example following function:\n  void increment_counter(int* count) {\n    *count++;\n  }\n  is invalid, because it effectively does count++, moving pointer, and should\n  be replaced with ++*count, (*count)++ or *count += 1.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  if _RE_PATTERN_INVALID_INCREMENT.match(line):\n    error(filename, linenum, 'runtime/invalid_increment', 5,\n          'Changing pointer instead of value (or unused value of operator*).')\n\n\ndef IsMacroDefinition(clean_lines, linenum):\n  if Search(r'^#define', clean_lines[linenum]):\n    return True\n\n  if linenum > 0 and Search(r'\\\\$', clean_lines[linenum - 1]):\n    return True\n\n  return False\n\n\ndef IsForwardClassDeclaration(clean_lines, linenum):\n  return Match(r'^\\s*(\\btemplate\\b)*.*class\\s+\\w+;\\s*$', clean_lines[linenum])\n\n\nclass _BlockInfo(object):\n  \"\"\"Stores information about a generic block of code.\"\"\"\n\n  def __init__(self, linenum, seen_open_brace):\n    self.starting_linenum = linenum\n    self.seen_open_brace = seen_open_brace\n    self.open_parentheses = 0\n    self.inline_asm = _NO_ASM\n    self.check_namespace_indentation = False\n\n  def CheckBegin(self, filename, clean_lines, linenum, error):\n    \"\"\"Run checks that applies to text up to the opening brace.\n\n    This is mostly for checking the text after the class identifier\n    and the \"{\", usually where the base class is specified.  For other\n    blocks, there isn't much to check, so we always pass.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.\n    \"\"\"\n    pass\n\n  def CheckEnd(self, filename, clean_lines, linenum, error):\n    \"\"\"Run checks that applies to text after the closing brace.\n\n    This is mostly used for checking end of namespace comments.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.\n    \"\"\"\n    pass\n\n  def IsBlockInfo(self):\n    \"\"\"Returns true if this block is a _BlockInfo.\n\n    This is convenient for verifying that an object is an instance of\n    a _BlockInfo, but not an instance of any of the derived classes.\n\n    Returns:\n      True for this class, False for derived classes.\n    \"\"\"\n    return self.__class__ == _BlockInfo\n\n\nclass _ExternCInfo(_BlockInfo):\n  \"\"\"Stores information about an 'extern \"C\"' block.\"\"\"\n\n  def __init__(self, linenum):\n    _BlockInfo.__init__(self, linenum, True)\n\n\nclass _ClassInfo(_BlockInfo):\n  \"\"\"Stores information about a class.\"\"\"\n\n  def __init__(self, name, class_or_struct, clean_lines, linenum):\n    _BlockInfo.__init__(self, linenum, False)\n    self.name = name\n    self.is_derived = False\n    self.check_namespace_indentation = True\n    if class_or_struct == 'struct':\n      self.access = 'public'\n      self.is_struct = True\n    else:\n      self.access = 'private'\n      self.is_struct = False\n\n    # Remember initial indentation level for this class.  Using raw_lines here\n    # instead of elided to account for leading comments.\n    self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])\n\n    # Try to find the end of the class.  This will be confused by things like:\n    #   class A {\n    #   } *x = { ...\n    #\n    # But it's still good enough for CheckSectionSpacing.\n    self.last_line = 0\n    depth = 0\n    for i in range(linenum, clean_lines.NumLines()):\n      line = clean_lines.elided[i]\n      depth += line.count('{') - line.count('}')\n      if not depth:\n        self.last_line = i\n        break\n\n  def CheckBegin(self, filename, clean_lines, linenum, error):\n    # Look for a bare ':'\n    if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):\n      self.is_derived = True\n\n  def CheckEnd(self, filename, clean_lines, linenum, error):\n    # If there is a DISALLOW macro, it should appear near the end of\n    # the class.\n    seen_last_thing_in_class = False\n    for i in xrange(linenum - 1, self.starting_linenum, -1):\n      match = Search(\n          r'\\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\\(' +\n          self.name + r'\\)',\n          clean_lines.elided[i])\n      if match:\n        if seen_last_thing_in_class:\n          error(filename, i, 'readability/constructors', 3,\n                match.group(1) + ' should be the last thing in the class')\n        break\n\n      if not Match(r'^\\s*$', clean_lines.elided[i]):\n        seen_last_thing_in_class = True\n\n    # Check that closing brace is aligned with beginning of the class.\n    # Only do this if the closing brace is indented by only whitespaces.\n    # This means we will not check single-line class definitions.\n    indent = Match(r'^( *)\\}', clean_lines.elided[linenum])\n    if indent and len(indent.group(1)) != self.class_indent:\n      if self.is_struct:\n        parent = 'struct ' + self.name\n      else:\n        parent = 'class ' + self.name\n      error(filename, linenum, 'whitespace/indent', 3,\n            'Closing brace should be aligned with beginning of %s' % parent)\n\n\nclass _NamespaceInfo(_BlockInfo):\n  \"\"\"Stores information about a namespace.\"\"\"\n\n  def __init__(self, name, linenum):\n    _BlockInfo.__init__(self, linenum, False)\n    self.name = name or ''\n    self.check_namespace_indentation = True\n\n  def CheckEnd(self, filename, clean_lines, linenum, error):\n    \"\"\"Check end of namespace comments.\"\"\"\n    line = clean_lines.raw_lines[linenum]\n\n    # Check how many lines is enclosed in this namespace.  Don't issue\n    # warning for missing namespace comments if there aren't enough\n    # lines.  However, do apply checks if there is already an end of\n    # namespace comment and it's incorrect.\n    #\n    # TODO(unknown): We always want to check end of namespace comments\n    # if a namespace is large, but sometimes we also want to apply the\n    # check if a short namespace contained nontrivial things (something\n    # other than forward declarations).  There is currently no logic on\n    # deciding what these nontrivial things are, so this check is\n    # triggered by namespace size only, which works most of the time.\n    if (linenum - self.starting_linenum < 10\n        and not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\b', line)):\n      return\n\n    # Look for matching comment at end of namespace.\n    #\n    # Note that we accept C style \"/* */\" comments for terminating\n    # namespaces, so that code that terminate namespaces inside\n    # preprocessor macros can be cpplint clean.\n    #\n    # We also accept stuff like \"// end of namespace <name>.\" with the\n    # period at the end.\n    #\n    # Besides these, we don't accept anything else, otherwise we might\n    # get false negatives when existing comment is a substring of the\n    # expected namespace.\n    if self.name:\n      # Named namespace\n      if not Match((r'^\\s*};*\\s*(//|/\\*).*\\bnamespace\\s+' +\n                    re.escape(self.name) + r'[\\*/\\.\\\\\\s]*$'),\n                   line):\n        error(filename, linenum, 'readability/namespace', 5,\n              'Namespace should be terminated with \"// namespace %s\"' %\n              self.name)\n    else:\n      # Anonymous namespace\n      if not Match(r'^\\s*};*\\s*(//|/\\*).*\\bnamespace[\\*/\\.\\\\\\s]*$', line):\n        # If \"// namespace anonymous\" or \"// anonymous namespace (more text)\",\n        # mention \"// anonymous namespace\" as an acceptable form\n        if Match(r'^\\s*}.*\\b(namespace anonymous|anonymous namespace)\\b', line):\n          error(filename, linenum, 'readability/namespace', 5,\n                'Anonymous namespace should be terminated with \"// namespace\"'\n                ' or \"// anonymous namespace\"')\n        else:\n          error(filename, linenum, 'readability/namespace', 5,\n                'Anonymous namespace should be terminated with \"// namespace\"')\n\n\nclass _PreprocessorInfo(object):\n  \"\"\"Stores checkpoints of nesting stacks when #if/#else is seen.\"\"\"\n\n  def __init__(self, stack_before_if):\n    # The entire nesting stack before #if\n    self.stack_before_if = stack_before_if\n\n    # The entire nesting stack up to #else\n    self.stack_before_else = []\n\n    # Whether we have already seen #else or #elif\n    self.seen_else = False\n\n\nclass NestingState(object):\n  \"\"\"Holds states related to parsing braces.\"\"\"\n\n  def __init__(self):\n    # Stack for tracking all braces.  An object is pushed whenever we\n    # see a \"{\", and popped when we see a \"}\".  Only 3 types of\n    # objects are possible:\n    # - _ClassInfo: a class or struct.\n    # - _NamespaceInfo: a namespace.\n    # - _BlockInfo: some other type of block.\n    self.stack = []\n\n    # Top of the previous stack before each Update().\n    #\n    # Because the nesting_stack is updated at the end of each line, we\n    # had to do some convoluted checks to find out what is the current\n    # scope at the beginning of the line.  This check is simplified by\n    # saving the previous top of nesting stack.\n    #\n    # We could save the full stack, but we only need the top.  Copying\n    # the full nesting stack would slow down cpplint by ~10%.\n    self.previous_stack_top = []\n\n    # Stack of _PreprocessorInfo objects.\n    self.pp_stack = []\n\n  def SeenOpenBrace(self):\n    \"\"\"Check if we have seen the opening brace for the innermost block.\n\n    Returns:\n      True if we have seen the opening brace, False if the innermost\n      block is still expecting an opening brace.\n    \"\"\"\n    return (not self.stack) or self.stack[-1].seen_open_brace\n\n  def InNamespaceBody(self):\n    \"\"\"Check if we are currently one level inside a namespace body.\n\n    Returns:\n      True if top of the stack is a namespace block, False otherwise.\n    \"\"\"\n    return self.stack and isinstance(self.stack[-1], _NamespaceInfo)\n\n  def InExternC(self):\n    \"\"\"Check if we are currently one level inside an 'extern \"C\"' block.\n\n    Returns:\n      True if top of the stack is an extern block, False otherwise.\n    \"\"\"\n    return self.stack and isinstance(self.stack[-1], _ExternCInfo)\n\n  def InClassDeclaration(self):\n    \"\"\"Check if we are currently one level inside a class or struct declaration.\n\n    Returns:\n      True if top of the stack is a class/struct, False otherwise.\n    \"\"\"\n    return self.stack and isinstance(self.stack[-1], _ClassInfo)\n\n  def InAsmBlock(self):\n    \"\"\"Check if we are currently one level inside an inline ASM block.\n\n    Returns:\n      True if the top of the stack is a block containing inline ASM.\n    \"\"\"\n    return self.stack and self.stack[-1].inline_asm != _NO_ASM\n\n  def InTemplateArgumentList(self, clean_lines, linenum, pos):\n    \"\"\"Check if current position is inside template argument list.\n\n    Args:\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      pos: position just after the suspected template argument.\n    Returns:\n      True if (linenum, pos) is inside template arguments.\n    \"\"\"\n    while linenum < clean_lines.NumLines():\n      # Find the earliest character that might indicate a template argument\n      line = clean_lines.elided[linenum]\n      match = Match(r'^[^{};=\\[\\]\\.<>]*(.)', line[pos:])\n      if not match:\n        linenum += 1\n        pos = 0\n        continue\n      token = match.group(1)\n      pos += len(match.group(0))\n\n      # These things do not look like template argument list:\n      #   class Suspect {\n      #   class Suspect x; }\n      if token in ('{', '}', ';'): return False\n\n      # These things look like template argument list:\n      #   template <class Suspect>\n      #   template <class Suspect = default_value>\n      #   template <class Suspect[]>\n      #   template <class Suspect...>\n      if token in ('>', '=', '[', ']', '.'): return True\n\n      # Check if token is an unmatched '<'.\n      # If not, move on to the next character.\n      if token != '<':\n        pos += 1\n        if pos >= len(line):\n          linenum += 1\n          pos = 0\n        continue\n\n      # We can't be sure if we just find a single '<', and need to\n      # find the matching '>'.\n      (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)\n      if end_pos < 0:\n        # Not sure if template argument list or syntax error in file\n        return False\n      linenum = end_line\n      pos = end_pos\n    return False\n\n  def UpdatePreprocessor(self, line):\n    \"\"\"Update preprocessor stack.\n\n    We need to handle preprocessors due to classes like this:\n      #ifdef SWIG\n      struct ResultDetailsPageElementExtensionPoint {\n      #else\n      struct ResultDetailsPageElementExtensionPoint : public Extension {\n      #endif\n\n    We make the following assumptions (good enough for most files):\n    - Preprocessor condition evaluates to true from #if up to first\n      #else/#elif/#endif.\n\n    - Preprocessor condition evaluates to false from #else/#elif up\n      to #endif.  We still perform lint checks on these lines, but\n      these do not affect nesting stack.\n\n    Args:\n      line: current line to check.\n    \"\"\"\n    if Match(r'^\\s*#\\s*(if|ifdef|ifndef)\\b', line):\n      # Beginning of #if block, save the nesting stack here.  The saved\n      # stack will allow us to restore the parsing state in the #else case.\n      self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))\n    elif Match(r'^\\s*#\\s*(else|elif)\\b', line):\n      # Beginning of #else block\n      if self.pp_stack:\n        if not self.pp_stack[-1].seen_else:\n          # This is the first #else or #elif block.  Remember the\n          # whole nesting stack up to this point.  This is what we\n          # keep after the #endif.\n          self.pp_stack[-1].seen_else = True\n          self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)\n\n        # Restore the stack to how it was before the #if\n        self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)\n      else:\n        # TODO(unknown): unexpected #else, issue warning?\n        pass\n    elif Match(r'^\\s*#\\s*endif\\b', line):\n      # End of #if or #else blocks.\n      if self.pp_stack:\n        # If we saw an #else, we will need to restore the nesting\n        # stack to its former state before the #else, otherwise we\n        # will just continue from where we left off.\n        if self.pp_stack[-1].seen_else:\n          # Here we can just use a shallow copy since we are the last\n          # reference to it.\n          self.stack = self.pp_stack[-1].stack_before_else\n        # Drop the corresponding #if\n        self.pp_stack.pop()\n      else:\n        # TODO(unknown): unexpected #endif, issue warning?\n        pass\n\n  # TODO(unknown): Update() is too long, but we will refactor later.\n  def Update(self, filename, clean_lines, linenum, error):\n    \"\"\"Update nesting state with current line.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.\n    \"\"\"\n    line = clean_lines.elided[linenum]\n\n    # Remember top of the previous nesting stack.\n    #\n    # The stack is always pushed/popped and not modified in place, so\n    # we can just do a shallow copy instead of copy.deepcopy.  Using\n    # deepcopy would slow down cpplint by ~28%.\n    if self.stack:\n      self.previous_stack_top = self.stack[-1]\n    else:\n      self.previous_stack_top = None\n\n    # Update pp_stack\n    self.UpdatePreprocessor(line)\n\n    # Count parentheses.  This is to avoid adding struct arguments to\n    # the nesting stack.\n    if self.stack:\n      inner_block = self.stack[-1]\n      depth_change = line.count('(') - line.count(')')\n      inner_block.open_parentheses += depth_change\n\n      # Also check if we are starting or ending an inline assembly block.\n      if inner_block.inline_asm in (_NO_ASM, _END_ASM):\n        if (depth_change != 0 and\n            inner_block.open_parentheses == 1 and\n            _MATCH_ASM.match(line)):\n          # Enter assembly block\n          inner_block.inline_asm = _INSIDE_ASM\n        else:\n          # Not entering assembly block.  If previous line was _END_ASM,\n          # we will now shift to _NO_ASM state.\n          inner_block.inline_asm = _NO_ASM\n      elif (inner_block.inline_asm == _INSIDE_ASM and\n            inner_block.open_parentheses == 0):\n        # Exit assembly block\n        inner_block.inline_asm = _END_ASM\n\n    # Consume namespace declaration at the beginning of the line.  Do\n    # this in a loop so that we catch same line declarations like this:\n    #   namespace proto2 { namespace bridge { class MessageSet; } }\n    while True:\n      # Match start of namespace.  The \"\\b\\s*\" below catches namespace\n      # declarations even if it weren't followed by a whitespace, this\n      # is so that we don't confuse our namespace checker.  The\n      # missing spaces will be flagged by CheckSpacing.\n      namespace_decl_match = Match(r'^\\s*namespace\\b\\s*([:\\w]+)?(.*)$', line)\n      if not namespace_decl_match:\n        break\n\n      new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)\n      self.stack.append(new_namespace)\n\n      line = namespace_decl_match.group(2)\n      if line.find('{') != -1:\n        new_namespace.seen_open_brace = True\n        line = line[line.find('{') + 1:]\n\n    # Look for a class declaration in whatever is left of the line\n    # after parsing namespaces.  The regexp accounts for decorated classes\n    # such as in:\n    #   class LOCKABLE API Object {\n    #   };\n    class_decl_match = Match(\n        r'^(\\s*(?:template\\s*<[\\w\\s<>,:]*>\\s*)?'\n        r'(class|struct)\\s+(?:[A-Z_]+\\s+)*(\\w+(?:::\\w+)*))'\n        r'(.*)$', line)\n    if (class_decl_match and\n        (not self.stack or self.stack[-1].open_parentheses == 0)):\n      # We do not want to accept classes that are actually template arguments:\n      #   template <class Ignore1,\n      #             class Ignore2 = Default<Args>,\n      #             template <Args> class Ignore3>\n      #   void Function() {};\n      #\n      # To avoid template argument cases, we scan forward and look for\n      # an unmatched '>'.  If we see one, assume we are inside a\n      # template argument list.\n      end_declaration = len(class_decl_match.group(1))\n      if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):\n        self.stack.append(_ClassInfo(\n            class_decl_match.group(3), class_decl_match.group(2),\n            clean_lines, linenum))\n        line = class_decl_match.group(4)\n\n    # If we have not yet seen the opening brace for the innermost block,\n    # run checks here.\n    if not self.SeenOpenBrace():\n      self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)\n\n    # Update access control if we are inside a class/struct\n    if self.stack and isinstance(self.stack[-1], _ClassInfo):\n      classinfo = self.stack[-1]\n      access_match = Match(\n          r'^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?'\n          r':(?:[^:]|$)',\n          line)\n      if access_match:\n        classinfo.access = access_match.group(2)\n\n        # Check that access keywords are indented +1 space.  Skip this\n        # check if the keywords are not preceded by whitespaces.\n        indent = access_match.group(1)\n        if (len(indent) != classinfo.class_indent + 1 and\n            Match(r'^\\s*$', indent)):\n          if classinfo.is_struct:\n            parent = 'struct ' + classinfo.name\n          else:\n            parent = 'class ' + classinfo.name\n          slots = ''\n          if access_match.group(3):\n            slots = access_match.group(3)\n          error(filename, linenum, 'whitespace/indent', 3,\n                '%s%s: should be indented +1 space inside %s' % (\n                    access_match.group(2), slots, parent))\n\n    # Consume braces or semicolons from what's left of the line\n    while True:\n      # Match first brace, semicolon, or closed parenthesis.\n      matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)\n      if not matched:\n        break\n\n      token = matched.group(1)\n      if token == '{':\n        # If namespace or class hasn't seen a opening brace yet, mark\n        # namespace/class head as complete.  Push a new block onto the\n        # stack otherwise.\n        if not self.SeenOpenBrace():\n          self.stack[-1].seen_open_brace = True\n        elif Match(r'^extern\\s*\"[^\"]*\"\\s*\\{', line):\n          self.stack.append(_ExternCInfo(linenum))\n        else:\n          self.stack.append(_BlockInfo(linenum, True))\n          if _MATCH_ASM.match(line):\n            self.stack[-1].inline_asm = _BLOCK_ASM\n\n      elif token == ';' or token == ')':\n        # If we haven't seen an opening brace yet, but we already saw\n        # a semicolon, this is probably a forward declaration.  Pop\n        # the stack for these.\n        #\n        # Similarly, if we haven't seen an opening brace yet, but we\n        # already saw a closing parenthesis, then these are probably\n        # function arguments with extra \"class\" or \"struct\" keywords.\n        # Also pop these stack for these.\n        if not self.SeenOpenBrace():\n          self.stack.pop()\n      else:  # token == '}'\n        # Perform end of block checks and pop the stack.\n        if self.stack:\n          self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)\n          self.stack.pop()\n      line = matched.group(2)\n\n  def InnermostClass(self):\n    \"\"\"Get class info on the top of the stack.\n\n    Returns:\n      A _ClassInfo object if we are inside a class, or None otherwise.\n    \"\"\"\n    for i in range(len(self.stack), 0, -1):\n      classinfo = self.stack[i - 1]\n      if isinstance(classinfo, _ClassInfo):\n        return classinfo\n    return None\n\n  def CheckCompletedBlocks(self, filename, error):\n    \"\"\"Checks that all classes and namespaces have been completely parsed.\n\n    Call this when all lines in a file have been processed.\n    Args:\n      filename: The name of the current file.\n      error: The function to call with any errors found.\n    \"\"\"\n    # Note: This test can result in false positives if #ifdef constructs\n    # get in the way of brace matching. See the testBuildClass test in\n    # cpplint_unittest.py for an example of this.\n    for obj in self.stack:\n      if isinstance(obj, _ClassInfo):\n        error(filename, obj.starting_linenum, 'build/class', 5,\n              'Failed to find complete declaration of class %s' %\n              obj.name)\n      elif isinstance(obj, _NamespaceInfo):\n        error(filename, obj.starting_linenum, 'build/namespaces', 5,\n              'Failed to find complete declaration of namespace %s' %\n              obj.name)\n\n\ndef CheckForNonStandardConstructs(filename, clean_lines, linenum,\n                                  nesting_state, error):\n  r\"\"\"Logs an error if we see certain non-ANSI constructs ignored by gcc-2.\n\n  Complain about several constructs which gcc-2 accepts, but which are\n  not standard C++.  Warning about these in lint is one way to ease the\n  transition to new compilers.\n  - put storage class first (e.g. \"static const\" instead of \"const static\").\n  - \"%lld\" instead of %qd\" in printf-type functions.\n  - \"%1$d\" is non-standard in printf-type functions.\n  - \"\\%\" is an undefined character escape sequence.\n  - text after #endif is not allowed.\n  - invalid inner-style forward declaration.\n  - >? and <? operators, and their >?= and <?= cousins.\n\n  Additionally, check for constructor/destructor style violations and reference\n  members, as it is very convenient to do so while checking for\n  gcc-2 compliance.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: A callable to which errors are reported, which takes 4 arguments:\n           filename, line number, error level, and message\n  \"\"\"\n\n  # Remove comments from the line, but leave in strings for now.\n  line = clean_lines.lines[linenum]\n\n  if Search(r'printf\\s*\\(.*\".*%[-+ ]?\\d*q', line):\n    error(filename, linenum, 'runtime/printf_format', 3,\n          '%q in format strings is deprecated.  Use %ll instead.')\n\n  if Search(r'printf\\s*\\(.*\".*%\\d+\\$', line):\n    error(filename, linenum, 'runtime/printf_format', 2,\n          '%N$ formats are unconventional.  Try rewriting to avoid them.')\n\n  # Remove escaped backslashes before looking for undefined escapes.\n  line = line.replace('\\\\\\\\', '')\n\n  if Search(r'(\"|\\').*\\\\(%|\\[|\\(|{)', line):\n    error(filename, linenum, 'build/printf_format', 3,\n          '%, [, (, and { are undefined character escapes.  Unescape them.')\n\n  # For the rest, work with both comments and strings removed.\n  line = clean_lines.elided[linenum]\n\n  if Search(r'\\b(const|volatile|void|char|short|int|long'\n            r'|float|double|signed|unsigned'\n            r'|schar|u?int8|u?int16|u?int32|u?int64)'\n            r'\\s+(register|static|extern|typedef)\\b',\n            line):\n    error(filename, linenum, 'build/storage_class', 5,\n          'Storage-class specifier (static, extern, typedef, etc) should be '\n          'at the beginning of the declaration.')\n\n  if Match(r'\\s*#\\s*endif\\s*[^/\\s]+', line):\n    error(filename, linenum, 'build/endif_comment', 5,\n          'Uncommented text after #endif is non-standard.  Use a comment.')\n\n  if Match(r'\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;', line):\n    error(filename, linenum, 'build/forward_decl', 5,\n          'Inner-style forward declarations are invalid.  Remove this line.')\n\n  if Search(r'(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?',\n            line):\n    error(filename, linenum, 'build/deprecated', 3,\n          '>? and <? (max and min) operators are non-standard and deprecated.')\n\n  if Search(r'^\\s*const\\s*string\\s*&\\s*\\w+\\s*;', line):\n    # TODO(unknown): Could it be expanded safely to arbitrary references,\n    # without triggering too many false positives? The first\n    # attempt triggered 5 warnings for mostly benign code in the regtest, hence\n    # the restriction.\n    # Here's the original regexp, for the reference:\n    # type_name = r'\\w+((\\s*::\\s*\\w+)|(\\s*<\\s*\\w+?\\s*>))?'\n    # r'\\s*const\\s*' + type_name + '\\s*&\\s*\\w+\\s*;'\n    error(filename, linenum, 'runtime/member_string_references', 2,\n          'const string& members are dangerous. It is much better to use '\n          'alternatives, such as pointers or simple constants.')\n\n  # Everything else in this function operates on class declarations.\n  # Return early if the top of the nesting stack is not a class, or if\n  # the class head is not completed yet.\n  classinfo = nesting_state.InnermostClass()\n  if not classinfo or not classinfo.seen_open_brace:\n    return\n\n  # The class may have been declared with namespace or classname qualifiers.\n  # The constructor and destructor will not have those qualifiers.\n  base_classname = classinfo.name.split('::')[-1]\n\n  # Look for single-argument constructors that aren't marked explicit.\n  # Technically a valid construct, but against style.\n  explicit_constructor_match = Match(\n      r'\\s+(?:(?:inline|constexpr)\\s+)*(explicit\\s+)?'\n      r'(?:(?:inline|constexpr)\\s+)*%s\\s*'\n      r'\\(((?:[^()]|\\([^()]*\\))*)\\)'\n      % re.escape(base_classname),\n      line)\n\n  if explicit_constructor_match:\n    is_marked_explicit = explicit_constructor_match.group(1)\n\n    if not explicit_constructor_match.group(2):\n      constructor_args = []\n    else:\n      constructor_args = explicit_constructor_match.group(2).split(',')\n\n    # collapse arguments so that commas in template parameter lists and function\n    # argument parameter lists don't split arguments in two\n    i = 0\n    while i < len(constructor_args):\n      constructor_arg = constructor_args[i]\n      while (constructor_arg.count('<') > constructor_arg.count('>') or\n             constructor_arg.count('(') > constructor_arg.count(')')):\n        constructor_arg += ',' + constructor_args[i + 1]\n        del constructor_args[i + 1]\n      constructor_args[i] = constructor_arg\n      i += 1\n\n    defaulted_args = [arg for arg in constructor_args if '=' in arg]\n    noarg_constructor = (not constructor_args or  # empty arg list\n                         # 'void' arg specifier\n                         (len(constructor_args) == 1 and\n                          constructor_args[0].strip() == 'void'))\n    onearg_constructor = ((len(constructor_args) == 1 and  # exactly one arg\n                           not noarg_constructor) or\n                          # all but at most one arg defaulted\n                          (len(constructor_args) >= 1 and\n                           not noarg_constructor and\n                           len(defaulted_args) >= len(constructor_args) - 1))\n    initializer_list_constructor = bool(\n        onearg_constructor and\n        Search(r'\\bstd\\s*::\\s*initializer_list\\b', constructor_args[0]))\n    copy_constructor = bool(\n        onearg_constructor and\n        Match(r'(const\\s+)?%s(\\s*<[^>]*>)?(\\s+const)?\\s*(?:<\\w+>\\s*)?&'\n              % re.escape(base_classname), constructor_args[0].strip()))\n\n    if (not is_marked_explicit and\n        onearg_constructor and\n        not initializer_list_constructor and\n        not copy_constructor):\n      if defaulted_args:\n        error(filename, linenum, 'runtime/explicit', 5,\n              'Constructors callable with one argument '\n              'should be marked explicit.')\n      else:\n        error(filename, linenum, 'runtime/explicit', 5,\n              'Single-parameter constructors should be marked explicit.')\n    elif is_marked_explicit and not onearg_constructor:\n      if noarg_constructor:\n        error(filename, linenum, 'runtime/explicit', 5,\n              'Zero-parameter constructors should not be marked explicit.')\n\n\ndef CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):\n  \"\"\"Checks for the correctness of various spacing around function calls.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Since function calls often occur inside if/for/while/switch\n  # expressions - which have their own, more liberal conventions - we\n  # first see if we should be looking inside such an expression for a\n  # function call, to which we can apply more strict standards.\n  fncall = line    # if there's no control flow construct, look at whole line\n  for pattern in (r'\\bif\\s*\\((.*)\\)\\s*{',\n                  r'\\bfor\\s*\\((.*)\\)\\s*{',\n                  r'\\bwhile\\s*\\((.*)\\)\\s*[{;]',\n                  r'\\bswitch\\s*\\((.*)\\)\\s*{'):\n    match = Search(pattern, line)\n    if match:\n      fncall = match.group(1)    # look inside the parens for function calls\n      break\n\n  # Except in if/for/while/switch, there should never be space\n  # immediately inside parens (eg \"f( 3, 4 )\").  We make an exception\n  # for nested parens ( (a+b) + c ).  Likewise, there should never be\n  # a space before a ( when it's a function argument.  I assume it's a\n  # function argument when the char before the whitespace is legal in\n  # a function name (alnum + _) and we're not starting a macro. Also ignore\n  # pointers and references to arrays and functions coz they're too tricky:\n  # we use a very simple way to recognize these:\n  # \" (something)(maybe-something)\" or\n  # \" (something)(maybe-something,\" or\n  # \" (something)[something]\"\n  # Note that we assume the contents of [] to be short enough that\n  # they'll never need to wrap.\n  if (  # Ignore control structures.\n      not Search(r'\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b',\n                 fncall) and\n      # Ignore pointers/references to functions.\n      not Search(r' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall) and\n      # Ignore pointers/references to arrays.\n      not Search(r' \\([^)]+\\)\\[[^\\]]+\\]', fncall)):\n    if Search(r'\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall):      # a ( used for a fn call\n      error(filename, linenum, 'whitespace/parens', 4,\n            'Extra space after ( in function call')\n    elif Search(r'\\(\\s+(?!(\\s*\\\\)|\\()', fncall):\n      error(filename, linenum, 'whitespace/parens', 2,\n            'Extra space after (')\n    if (Search(r'\\w\\s+\\(', fncall) and\n        not Search(r'_{0,2}asm_{0,2}\\s+_{0,2}volatile_{0,2}\\s+\\(', fncall) and\n        not Search(r'#\\s*define|typedef|using\\s+\\w+\\s*=', fncall) and\n        not Search(r'\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall) and\n        not Search(r'\\bcase\\s+\\(', fncall)):\n      # TODO(unknown): Space after an operator function seem to be a common\n      # error, silence those for now by restricting them to highest verbosity.\n      if Search(r'\\boperator_*\\b', line):\n        error(filename, linenum, 'whitespace/parens', 0,\n              'Extra space before ( in function call')\n      else:\n        error(filename, linenum, 'whitespace/parens', 4,\n              'Extra space before ( in function call')\n    # If the ) is followed only by a newline or a { + newline, assume it's\n    # part of a control statement (if/while/etc), and don't complain\n    if Search(r'[^)]\\s+\\)\\s*[^{\\s]', fncall):\n      # If the closing parenthesis is preceded by only whitespaces,\n      # try to give a more descriptive error message.\n      if Search(r'^\\s+\\)', fncall):\n        error(filename, linenum, 'whitespace/parens', 2,\n              'Closing ) should be moved to the previous line')\n      else:\n        error(filename, linenum, 'whitespace/parens', 2,\n              'Extra space before )')\n\n\ndef IsBlankLine(line):\n  \"\"\"Returns true if the given line is blank.\n\n  We consider a line to be blank if the line is empty or consists of\n  only white spaces.\n\n  Args:\n    line: A line of a string.\n\n  Returns:\n    True, if the given line is blank.\n  \"\"\"\n  return not line or line.isspace()\n\n\ndef CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,\n                                 error):\n  is_namespace_indent_item = (\n      len(nesting_state.stack) > 1 and\n      nesting_state.stack[-1].check_namespace_indentation and\n      isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and\n      nesting_state.previous_stack_top == nesting_state.stack[-2])\n\n  if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,\n                                     clean_lines.elided, line):\n    CheckItemIndentationInNamespace(filename, clean_lines.elided,\n                                    line, error)\n\n\ndef CheckForFunctionLengths(filename, clean_lines, linenum,\n                            function_state, error):\n  \"\"\"Reports for long function bodies.\n\n  For an overview why this is done, see:\n  https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions\n\n  Uses a simplistic algorithm assuming other style guidelines\n  (especially spacing) are followed.\n  Only checks unindented functions, so class members are unchecked.\n  Trivial bodies are unchecked, so constructors with huge initializer lists\n  may be missed.\n  Blank/comment lines are not counted so as to avoid encouraging the removal\n  of vertical space and comments just to get through a lint check.\n  NOLINT *on the last line of a function* disables this check.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    function_state: Current function name and lines in body so far.\n    error: The function to call with any errors found.\n  \"\"\"\n  lines = clean_lines.lines\n  line = lines[linenum]\n  joined_line = ''\n\n  starting_func = False\n  regexp = r'(\\w(\\w|::|\\*|\\&|\\s)*)\\('  # decls * & space::name( ...\n  match_result = Match(regexp, line)\n  if match_result:\n    # If the name is all caps and underscores, figure it's a macro and\n    # ignore it, unless it's TEST or TEST_F.\n    function_name = match_result.group(1).split()[-1]\n    if function_name == 'TEST' or function_name == 'TEST_F' or (\n        not Match(r'[A-Z_]+$', function_name)):\n      starting_func = True\n\n  if starting_func:\n    body_found = False\n    for start_linenum in xrange(linenum, clean_lines.NumLines()):\n      start_line = lines[start_linenum]\n      joined_line += ' ' + start_line.lstrip()\n      if Search(r'(;|})', start_line):  # Declarations and trivial functions\n        body_found = True\n        break                              # ... ignore\n      elif Search(r'{', start_line):\n        body_found = True\n        function = Search(r'((\\w|:)*)\\(', line).group(1)\n        if Match(r'TEST', function):    # Handle TEST... macros\n          parameter_regexp = Search(r'(\\(.*\\))', joined_line)\n          if parameter_regexp:             # Ignore bad syntax\n            function += parameter_regexp.group(1)\n        else:\n          function += '()'\n        function_state.Begin(function)\n        break\n    if not body_found:\n      # No body for the function (or evidence of a non-function) was found.\n      error(filename, linenum, 'readability/fn_size', 5,\n            'Lint failed to find start of function body.')\n  elif Match(r'^\\}\\s*$', line):  # function end\n    function_state.Check(error, filename, linenum)\n    function_state.End()\n  elif not Match(r'^\\s*$', line):\n    function_state.Count()  # Count non-blank/non-comment lines.\n\n\n_RE_PATTERN_TODO = re.compile(r'^//(\\s*)TODO(\\(.+?\\))?:?(\\s|$)?')\n\n\ndef CheckComment(line, filename, linenum, next_line_start, error):\n  \"\"\"Checks for common mistakes in comments.\n\n  Args:\n    line: The line in question.\n    filename: The name of the current file.\n    linenum: The number of the line to check.\n    next_line_start: The first non-whitespace column of the next line.\n    error: The function to call with any errors found.\n  \"\"\"\n  commentpos = line.find('//')\n  if commentpos != -1:\n    # Check if the // may be in quotes.  If so, ignore it\n    if re.sub(r'\\\\.', '', line[0:commentpos]).count('\"') % 2 == 0:\n      # Allow one space for new scopes, two spaces otherwise:\n      if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and\n          ((commentpos >= 1 and\n            line[commentpos-1] not in string.whitespace) or\n           (commentpos >= 2 and\n            line[commentpos-2] not in string.whitespace))):\n        error(filename, linenum, 'whitespace/comments', 2,\n              'At least two spaces is best between code and comments')\n\n      # Checks for common mistakes in TODO comments.\n      comment = line[commentpos:]\n      match = _RE_PATTERN_TODO.match(comment)\n      if match:\n        # One whitespace is correct; zero whitespace is handled elsewhere.\n        leading_whitespace = match.group(1)\n        if len(leading_whitespace) > 1:\n          error(filename, linenum, 'whitespace/todo', 2,\n                'Too many spaces before TODO')\n\n        username = match.group(2)\n        if not username:\n          error(filename, linenum, 'readability/todo', 2,\n                'Missing username in TODO; it should look like '\n                '\"// TODO(my_username): Stuff.\"')\n\n        middle_whitespace = match.group(3)\n        # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison\n        if middle_whitespace != ' ' and middle_whitespace != '':\n          error(filename, linenum, 'whitespace/todo', 2,\n                'TODO(my_username) should be followed by a space')\n\n      # If the comment contains an alphanumeric character, there\n      # should be a space somewhere between it and the // unless\n      # it's a /// or //! Doxygen comment.\n      if (Match(r'//[^ ]*\\w', comment) and\n          not Match(r'(///|//\\!)(\\s+|$)', comment)):\n        error(filename, linenum, 'whitespace/comments', 4,\n              'Should have a space between // and comment')\n\n\ndef CheckSpacing(filename, clean_lines, linenum, nesting_state, error):\n  \"\"\"Checks for the correctness of various spacing issues in the code.\n\n  Things we check for: spaces around operators, spaces after\n  if/for/while/switch, no spaces around parens in function calls, two\n  spaces between code and comment, don't start a block with a blank\n  line, don't end a function with a blank line, don't add a blank line\n  after public/protected/private, don't have too many blank lines in a row.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  # Don't use \"elided\" lines here, otherwise we can't check commented lines.\n  # Don't want to use \"raw\" either, because we don't want to check inside C++11\n  # raw strings,\n  raw = clean_lines.lines_without_raw_strings\n  line = raw[linenum]\n\n  # Before nixing comments, check if the line is blank for no good\n  # reason.  This includes the first line after a block is opened, and\n  # blank lines at the end of a function (ie, right before a line like '}'\n  #\n  # Skip all the blank line checks if we are immediately inside a\n  # namespace body.  In other words, don't issue blank line warnings\n  # for this block:\n  #   namespace {\n  #\n  #   }\n  #\n  # A warning about missing end of namespace comments will be issued instead.\n  #\n  # Also skip blank line checks for 'extern \"C\"' blocks, which are formatted\n  # like namespaces.\n  if (IsBlankLine(line) and\n      not nesting_state.InNamespaceBody() and\n      not nesting_state.InExternC()):\n    elided = clean_lines.elided\n    prev_line = elided[linenum - 1]\n    prevbrace = prev_line.rfind('{')\n    # TODO(unknown): Don't complain if line before blank line, and line after,\n    #                both start with alnums and are indented the same amount.\n    #                This ignores whitespace at the start of a namespace block\n    #                because those are not usually indented.\n    if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:\n      # OK, we have a blank line at the start of a code block.  Before we\n      # complain, we check if it is an exception to the rule: The previous\n      # non-empty line has the parameters of a function header that are indented\n      # 4 spaces (because they did not fit in a 80 column line when placed on\n      # the same line as the function name).  We also check for the case where\n      # the previous line is indented 6 spaces, which may happen when the\n      # initializers of a constructor do not fit into a 80 column line.\n      exception = False\n      if Match(r' {6}\\w', prev_line):  # Initializer list?\n        # We are looking for the opening column of initializer list, which\n        # should be indented 4 spaces to cause 6 space indentation afterwards.\n        search_position = linenum-2\n        while (search_position >= 0\n               and Match(r' {6}\\w', elided[search_position])):\n          search_position -= 1\n        exception = (search_position >= 0\n                     and elided[search_position][:5] == '    :')\n      else:\n        # Search for the function arguments or an initializer list.  We use a\n        # simple heuristic here: If the line is indented 4 spaces; and we have a\n        # closing paren, without the opening paren, followed by an opening brace\n        # or colon (for initializer lists) we assume that it is the last line of\n        # a function header.  If we have a colon indented 4 spaces, it is an\n        # initializer list.\n        exception = (Match(r' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)',\n                           prev_line)\n                     or Match(r' {4}:', prev_line))\n\n      if not exception:\n        error(filename, linenum, 'whitespace/blank_line', 2,\n              'Redundant blank line at the start of a code block '\n              'should be deleted.')\n    # Ignore blank lines at the end of a block in a long if-else\n    # chain, like this:\n    #   if (condition1) {\n    #     // Something followed by a blank line\n    #\n    #   } else if (condition2) {\n    #     // Something else\n    #   }\n    if linenum + 1 < clean_lines.NumLines():\n      next_line = raw[linenum + 1]\n      if (next_line\n          and Match(r'\\s*}', next_line)\n          and next_line.find('} else ') == -1):\n        error(filename, linenum, 'whitespace/blank_line', 3,\n              'Redundant blank line at the end of a code block '\n              'should be deleted.')\n\n    matched = Match(r'\\s*(public|protected|private):', prev_line)\n    if matched:\n      error(filename, linenum, 'whitespace/blank_line', 3,\n            'Do not leave a blank line after \"%s:\"' % matched.group(1))\n\n  # Next, check comments\n  next_line_start = 0\n  if linenum + 1 < clean_lines.NumLines():\n    next_line = raw[linenum + 1]\n    next_line_start = len(next_line) - len(next_line.lstrip())\n  CheckComment(line, filename, linenum, next_line_start, error)\n\n  # get rid of comments and strings\n  line = clean_lines.elided[linenum]\n\n  # You shouldn't have spaces before your brackets, except maybe after\n  # 'delete []' or 'return []() {};'\n  if Search(r'\\w\\s+\\[', line) and not Search(r'(?:delete|return)\\s+\\[', line):\n    error(filename, linenum, 'whitespace/braces', 5,\n          'Extra space before [')\n\n  # In range-based for, we wanted spaces before and after the colon, but\n  # not around \"::\" tokens that might appear.\n  if (Search(r'for *\\(.*[^:]:[^: ]', line) or\n      Search(r'for *\\(.*[^: ]:[^:]', line)):\n    error(filename, linenum, 'whitespace/forcolon', 2,\n          'Missing space around colon in range-based for loop')\n\n\ndef CheckOperatorSpacing(filename, clean_lines, linenum, error):\n  \"\"\"Checks for horizontal spacing around operators.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Don't try to do spacing checks for operator methods.  Do this by\n  # replacing the troublesome characters with something else,\n  # preserving column position for all other characters.\n  #\n  # The replacement is done repeatedly to avoid false positives from\n  # operators that call operators.\n  while True:\n    match = Match(r'^(.*\\boperator\\b)(\\S+)(\\s*\\(.*)$', line)\n    if match:\n      line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)\n    else:\n      break\n\n  # We allow no-spaces around = within an if: \"if ( (a=Foo()) == 0 )\".\n  # Otherwise not.  Note we only check for non-spaces on *both* sides;\n  # sometimes people put non-spaces on one side when aligning ='s among\n  # many lines (not that this is behavior that I approve of...)\n  if ((Search(r'[\\w.]=', line) or\n       Search(r'=[\\w.]', line))\n      and not Search(r'\\b(if|while|for) ', line)\n      # Operators taken from [lex.operators] in C++11 standard.\n      and not Search(r'(>=|<=|==|!=|&=|\\^=|\\|=|\\+=|\\*=|\\/=|\\%=)', line)\n      and not Search(r'operator=', line)):\n    error(filename, linenum, 'whitespace/operators', 4,\n          'Missing spaces around =')\n\n  # It's ok not to have spaces around binary operators like + - * /, but if\n  # there's too little whitespace, we get concerned.  It's hard to tell,\n  # though, so we punt on this one for now.  TODO.\n\n  # You should always have whitespace around binary operators.\n  #\n  # Check <= and >= first to avoid false positives with < and >, then\n  # check non-include lines for spacing around < and >.\n  #\n  # If the operator is followed by a comma, assume it's be used in a\n  # macro context and don't do any checks.  This avoids false\n  # positives.\n  #\n  # Note that && is not included here.  This is because there are too\n  # many false positives due to RValue references.\n  match = Search(r'[^<>=!\\s](==|!=|<=|>=|\\|\\|)[^<>=!\\s,;\\)]', line)\n  if match:\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around %s' % match.group(1))\n  elif not Match(r'#.*include', line):\n    # Look for < that is not surrounded by spaces.  This is only\n    # triggered if both sides are missing spaces, even though\n    # technically should should flag if at least one side is missing a\n    # space.  This is done to avoid some false positives with shifts.\n    match = Match(r'^(.*[^\\s<])<[^\\s=<,]', line)\n    if match:\n      (_, _, end_pos) = CloseExpression(\n          clean_lines, linenum, len(match.group(1)))\n      if end_pos <= -1:\n        error(filename, linenum, 'whitespace/operators', 3,\n              'Missing spaces around <')\n\n    # Look for > that is not surrounded by spaces.  Similar to the\n    # above, we only trigger if both sides are missing spaces to avoid\n    # false positives with shifts.\n    match = Match(r'^(.*[^-\\s>])>[^\\s=>,]', line)\n    if match:\n      (_, _, start_pos) = ReverseCloseExpression(\n          clean_lines, linenum, len(match.group(1)))\n      if start_pos <= -1:\n        error(filename, linenum, 'whitespace/operators', 3,\n              'Missing spaces around >')\n\n  # We allow no-spaces around << when used like this: 10<<20, but\n  # not otherwise (particularly, not when used as streams)\n  #\n  # We also allow operators following an opening parenthesis, since\n  # those tend to be macros that deal with operators.\n  match = Search(r'(operator|[^\\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\\s,=<])', line)\n  if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and\n      not (match.group(1) == 'operator' and match.group(2) == ';')):\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around <<')\n\n  # We allow no-spaces around >> for almost anything.  This is because\n  # C++11 allows \">>\" to close nested templates, which accounts for\n  # most cases when \">>\" is not followed by a space.\n  #\n  # We still warn on \">>\" followed by alpha character, because that is\n  # likely due to \">>\" being used for right shifts, e.g.:\n  #   value >> alpha\n  #\n  # When \">>\" is used to close templates, the alphanumeric letter that\n  # follows would be part of an identifier, and there should still be\n  # a space separating the template type and the identifier.\n  #   type<type<type>> alpha\n  match = Search(r'>>[a-zA-Z_]', line)\n  if match:\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around >>')\n\n  # There shouldn't be space around unary operators\n  match = Search(r'(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line)\n  if match:\n    error(filename, linenum, 'whitespace/operators', 4,\n          'Extra space for operator %s' % match.group(1))\n\n\ndef CheckParenthesisSpacing(filename, clean_lines, linenum, error):\n  \"\"\"Checks for horizontal spacing around parentheses.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # No spaces after an if, while, switch, or for\n  match = Search(r' (if\\(|for\\(|while\\(|switch\\()', line)\n  if match:\n    error(filename, linenum, 'whitespace/parens', 5,\n          'Missing space before ( in %s' % match.group(1))\n\n  # For if/for/while/switch, the left and right parens should be\n  # consistent about how many spaces are inside the parens, and\n  # there should either be zero or one spaces inside the parens.\n  # We don't want: \"if ( foo)\" or \"if ( foo   )\".\n  # Exception: \"for ( ; foo; bar)\" and \"for (foo; bar; )\" are allowed.\n  match = Search(r'\\b(if|for|while|switch)\\s*'\n                 r'\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$',\n                 line)\n  if match:\n    if len(match.group(2)) != len(match.group(4)):\n      if not (match.group(3) == ';' and\n              len(match.group(2)) == 1 + len(match.group(4)) or\n              not match.group(2) and Search(r'\\bfor\\s*\\(.*; \\)', line)):\n        error(filename, linenum, 'whitespace/parens', 5,\n              'Mismatching spaces inside () in %s' % match.group(1))\n    if len(match.group(2)) not in [0, 1]:\n      error(filename, linenum, 'whitespace/parens', 5,\n            'Should have zero or one spaces inside ( and ) in %s' %\n            match.group(1))\n\n\ndef CheckCommaSpacing(filename, clean_lines, linenum, error):\n  \"\"\"Checks for horizontal spacing near commas and semicolons.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  raw = clean_lines.lines_without_raw_strings\n  line = clean_lines.elided[linenum]\n\n  # You should always have a space after a comma (either as fn arg or operator)\n  #\n  # This does not apply when the non-space character following the\n  # comma is another comma, since the only time when that happens is\n  # for empty macro arguments.\n  #\n  # We run this check in two passes: first pass on elided lines to\n  # verify that lines contain missing whitespaces, second pass on raw\n  # lines to confirm that those missing whitespaces are not due to\n  # elided comments.\n  if (Search(r',[^,\\s]', ReplaceAll(r'\\boperator\\s*,\\s*\\(', 'F(', line)) and\n      Search(r',[^,\\s]', raw[linenum])):\n    error(filename, linenum, 'whitespace/comma', 3,\n          'Missing space after ,')\n\n  # You should always have a space after a semicolon\n  # except for few corner cases\n  # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more\n  # space after ;\n  if Search(r';[^\\s};\\\\)/]', line):\n    error(filename, linenum, 'whitespace/semicolon', 3,\n          'Missing space after ;')\n\n\ndef _IsType(clean_lines, nesting_state, expr):\n  \"\"\"Check if expression looks like a type name, returns true if so.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    expr: The expression to check.\n  Returns:\n    True, if token looks like a type.\n  \"\"\"\n  # Keep only the last token in the expression\n  last_word = Match(r'^.*(\\b\\S+)$', expr)\n  if last_word:\n    token = last_word.group(1)\n  else:\n    token = expr\n\n  # Match native types and stdint types\n  if _TYPES.match(token):\n    return True\n\n  # Try a bit harder to match templated types.  Walk up the nesting\n  # stack until we find something that resembles a typename\n  # declaration for what we are looking for.\n  typename_pattern = (r'\\b(?:typename|class|struct)\\s+' + re.escape(token) +\n                      r'\\b')\n  block_index = len(nesting_state.stack) - 1\n  while block_index >= 0:\n    if isinstance(nesting_state.stack[block_index], _NamespaceInfo):\n      return False\n\n    # Found where the opening brace is.  We want to scan from this\n    # line up to the beginning of the function, minus a few lines.\n    #   template <typename Type1,  // stop scanning here\n    #             ...>\n    #   class C\n    #     : public ... {  // start scanning here\n    last_line = nesting_state.stack[block_index].starting_linenum\n\n    next_block_start = 0\n    if block_index > 0:\n      next_block_start = nesting_state.stack[block_index - 1].starting_linenum\n    first_line = last_line\n    while first_line >= next_block_start:\n      if clean_lines.elided[first_line].find('template') >= 0:\n        break\n      first_line -= 1\n    if first_line < next_block_start:\n      # Didn't find any \"template\" keyword before reaching the next block,\n      # there are probably no template things to check for this block\n      block_index -= 1\n      continue\n\n    # Look for typename in the specified range\n    for i in xrange(first_line, last_line + 1, 1):\n      if Search(typename_pattern, clean_lines.elided[i]):\n        return True\n    block_index -= 1\n\n  return False\n\n\ndef CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):\n  \"\"\"Checks for horizontal spacing near commas.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Except after an opening paren, or after another opening brace (in case of\n  # an initializer list, for instance), you should have spaces before your\n  # braces when they are delimiting blocks, classes, namespaces etc.\n  # And since you should never have braces at the beginning of a line,\n  # this is an easy test.  Except that braces used for initialization don't\n  # follow the same rule; we often don't want spaces before those.\n  match = Match(r'^(.*[^ ({>]){', line)\n\n  if match:\n    # Try a bit harder to check for brace initialization.  This\n    # happens in one of the following forms:\n    #   Constructor() : initializer_list_{} { ... }\n    #   Constructor{}.MemberFunction()\n    #   Type variable{};\n    #   FunctionCall(type{}, ...);\n    #   LastArgument(..., type{});\n    #   LOG(INFO) << type{} << \" ...\";\n    #   map_of_type[{...}] = ...;\n    #   ternary = expr ? new type{} : nullptr;\n    #   OuterTemplate<InnerTemplateConstructor<Type>{}>\n    #\n    # We check for the character following the closing brace, and\n    # silence the warning if it's one of those listed above, i.e.\n    # \"{.;,)<>]:\".\n    #\n    # To account for nested initializer list, we allow any number of\n    # closing braces up to \"{;,)<\".  We can't simply silence the\n    # warning on first sight of closing brace, because that would\n    # cause false negatives for things that are not initializer lists.\n    #   Silence this:         But not this:\n    #     Outer{                if (...) {\n    #       Inner{...}            if (...){  // Missing space before {\n    #     };                    }\n    #\n    # There is a false negative with this approach if people inserted\n    # spurious semicolons, e.g. \"if (cond){};\", but we will catch the\n    # spurious semicolon with a separate check.\n    leading_text = match.group(1)\n    (endline, endlinenum, endpos) = CloseExpression(\n        clean_lines, linenum, len(match.group(1)))\n    trailing_text = ''\n    if endpos > -1:\n      trailing_text = endline[endpos:]\n    for offset in xrange(endlinenum + 1,\n                         min(endlinenum + 3, clean_lines.NumLines() - 1)):\n      trailing_text += clean_lines.elided[offset]\n    # We also suppress warnings for `uint64_t{expression}` etc., as the style\n    # guide recommends brace initialization for integral types to avoid\n    # overflow/truncation.\n    if (not Match(r'^[\\s}]*[{.;,)<>\\]:]', trailing_text)\n        and not _IsType(clean_lines, nesting_state, leading_text)):\n      error(filename, linenum, 'whitespace/braces', 5,\n            'Missing space before {')\n\n  # Make sure '} else {' has spaces.\n  if Search(r'}else', line):\n    error(filename, linenum, 'whitespace/braces', 5,\n          'Missing space before else')\n\n  # You shouldn't have a space before a semicolon at the end of the line.\n  # There's a special case for \"for\" since the style guide allows space before\n  # the semicolon there.\n  if Search(r':\\s*;\\s*$', line):\n    error(filename, linenum, 'whitespace/semicolon', 5,\n          'Semicolon defining empty statement. Use {} instead.')\n  elif Search(r'^\\s*;\\s*$', line):\n    error(filename, linenum, 'whitespace/semicolon', 5,\n          'Line contains only semicolon. If this should be an empty statement, '\n          'use {} instead.')\n  elif (Search(r'\\s+;\\s*$', line) and\n        not Search(r'\\bfor\\b', line)):\n    error(filename, linenum, 'whitespace/semicolon', 5,\n          'Extra space before last semicolon. If this should be an empty '\n          'statement, use {} instead.')\n\n\ndef IsDecltype(clean_lines, linenum, column):\n  \"\"\"Check if the token ending on (linenum, column) is decltype().\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: the number of the line to check.\n    column: end column of the token to check.\n  Returns:\n    True if this token is decltype() expression, False otherwise.\n  \"\"\"\n  (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)\n  if start_col < 0:\n    return False\n  if Search(r'\\bdecltype\\s*$', text[0:start_col]):\n    return True\n  return False\n\n\ndef CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):\n  \"\"\"Checks for additional blank line issues related to sections.\n\n  Currently the only thing checked here is blank line before protected/private.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    class_info: A _ClassInfo objects.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  # Skip checks if the class is small, where small means 25 lines or less.\n  # 25 lines seems like a good cutoff since that's the usual height of\n  # terminals, and any class that can't fit in one screen can't really\n  # be considered \"small\".\n  #\n  # Also skip checks if we are on the first line.  This accounts for\n  # classes that look like\n  #   class Foo { public: ... };\n  #\n  # If we didn't find the end of the class, last_line would be zero,\n  # and the check will be skipped by the first condition.\n  if (class_info.last_line - class_info.starting_linenum <= 24 or\n      linenum <= class_info.starting_linenum):\n    return\n\n  matched = Match(r'\\s*(public|protected|private):', clean_lines.lines[linenum])\n  if matched:\n    # Issue warning if the line before public/protected/private was\n    # not a blank line, but don't do this if the previous line contains\n    # \"class\" or \"struct\".  This can happen two ways:\n    #  - We are at the beginning of the class.\n    #  - We are forward-declaring an inner class that is semantically\n    #    private, but needed to be public for implementation reasons.\n    # Also ignores cases where the previous line ends with a backslash as can be\n    # common when defining classes in C macros.\n    prev_line = clean_lines.lines[linenum - 1]\n    if (not IsBlankLine(prev_line) and\n        not Search(r'\\b(class|struct)\\b', prev_line) and\n        not Search(r'\\\\$', prev_line)):\n      # Try a bit harder to find the beginning of the class.  This is to\n      # account for multi-line base-specifier lists, e.g.:\n      #   class Derived\n      #       : public Base {\n      end_class_head = class_info.starting_linenum\n      for i in range(class_info.starting_linenum, linenum):\n        if Search(r'\\{\\s*$', clean_lines.lines[i]):\n          end_class_head = i\n          break\n      if end_class_head < linenum - 1:\n        error(filename, linenum, 'whitespace/blank_line', 3,\n              '\"%s:\" should be preceded by a blank line' % matched.group(1))\n\n\ndef GetPreviousNonBlankLine(clean_lines, linenum):\n  \"\"\"Return the most recent non-blank line and its line number.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file contents.\n    linenum: The number of the line to check.\n\n  Returns:\n    A tuple with two elements.  The first element is the contents of the last\n    non-blank line before the current line, or the empty string if this is the\n    first non-blank line.  The second is the line number of that line, or -1\n    if this is the first non-blank line.\n  \"\"\"\n\n  prevlinenum = linenum - 1\n  while prevlinenum >= 0:\n    prevline = clean_lines.elided[prevlinenum]\n    if not IsBlankLine(prevline):     # if not a blank line...\n      return (prevline, prevlinenum)\n    prevlinenum -= 1\n  return ('', -1)\n\n\ndef CheckBraces(filename, clean_lines, linenum, error):\n  \"\"\"Looks for misplaced braces (e.g. at the end of line).\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  line = clean_lines.elided[linenum]        # get rid of comments and strings\n\n  if Match(r'\\s*{\\s*$', line):\n    # We allow an open brace to start a line in the case where someone is using\n    # braces in a block to explicitly create a new scope, which is commonly used\n    # to control the lifetime of stack-allocated variables.  Braces are also\n    # used for brace initializers inside function calls.  We don't detect this\n    # perfectly: we just don't complain if the last non-whitespace character on\n    # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the\n    # previous line starts a preprocessor block. We also allow a brace on the\n    # following line if it is part of an array initialization and would not fit\n    # within the 80 character limit of the preceding line.\n    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]\n    if (not Search(r'[,;:}{(]\\s*$', prevline) and\n        not Match(r'\\s*#', prevline) and\n        not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):\n      error(filename, linenum, 'whitespace/braces', 4,\n            '{ should almost always be at the end of the previous line')\n\n  # An else clause should be on the same line as the preceding closing brace.\n  if Match(r'\\s*else\\b\\s*(?:if\\b|\\{|$)', line):\n    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]\n    if Match(r'\\s*}\\s*$', prevline):\n      error(filename, linenum, 'whitespace/newline', 4,\n            'An else should appear on the same line as the preceding }')\n\n  # If braces come on one side of an else, they should be on both.\n  # However, we have to worry about \"else if\" that spans multiple lines!\n  if Search(r'else if\\s*\\(', line):       # could be multi-line if\n    brace_on_left = bool(Search(r'}\\s*else if\\s*\\(', line))\n    # find the ( after the if\n    pos = line.find('else if')\n    pos = line.find('(', pos)\n    if pos > 0:\n      (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)\n      brace_on_right = endline[endpos:].find('{') != -1\n      if brace_on_left != brace_on_right:    # must be brace after if\n        error(filename, linenum, 'readability/braces', 5,\n              'If an else has a brace on one side, it should have it on both')\n  elif Search(r'}\\s*else[^{]*$', line) or Match(r'[^}]*else\\s*{', line):\n    error(filename, linenum, 'readability/braces', 5,\n          'If an else has a brace on one side, it should have it on both')\n\n  # Likewise, an else should never have the else clause on the same line\n  if Search(r'\\belse [^\\s{]', line) and not Search(r'\\belse if\\b', line):\n    error(filename, linenum, 'whitespace/newline', 4,\n          'Else clause should never be on same line as else (use 2 lines)')\n\n  # In the same way, a do/while should never be on one line\n  if Match(r'\\s*do [^\\s{]', line):\n    error(filename, linenum, 'whitespace/newline', 4,\n          'do/while clauses should not be on a single line')\n\n  # Check single-line if/else bodies. The style guide says 'curly braces are not\n  # required for single-line statements'. We additionally allow multi-line,\n  # single statements, but we reject anything with more than one semicolon in\n  # it. This means that the first semicolon after the if should be at the end of\n  # its line, and the line after that should have an indent level equal to or\n  # lower than the if. We also check for ambiguous if/else nesting without\n  # braces.\n  if_else_match = Search(r'\\b(if\\s*\\(|else\\b)', line)\n  if if_else_match and not Match(r'\\s*#', line):\n    if_indent = GetIndentLevel(line)\n    endline, endlinenum, endpos = line, linenum, if_else_match.end()\n    if_match = Search(r'\\bif\\s*\\(', line)\n    if if_match:\n      # This could be a multiline if condition, so find the end first.\n      pos = if_match.end() - 1\n      (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)\n    # Check for an opening brace, either directly after the if or on the next\n    # line. If found, this isn't a single-statement conditional.\n    if (not Match(r'\\s*{', endline[endpos:])\n        and not (Match(r'\\s*$', endline[endpos:])\n                 and endlinenum < (len(clean_lines.elided) - 1)\n                 and Match(r'\\s*{', clean_lines.elided[endlinenum + 1]))):\n      while (endlinenum < len(clean_lines.elided)\n             and ';' not in clean_lines.elided[endlinenum][endpos:]):\n        endlinenum += 1\n        endpos = 0\n      if endlinenum < len(clean_lines.elided):\n        endline = clean_lines.elided[endlinenum]\n        # We allow a mix of whitespace and closing braces (e.g. for one-liner\n        # methods) and a single \\ after the semicolon (for macros)\n        endpos = endline.find(';')\n        if not Match(r';[\\s}]*(\\\\?)$', endline[endpos:]):\n          # Semicolon isn't the last character, there's something trailing.\n          # Output a warning if the semicolon is not contained inside\n          # a lambda expression.\n          if not Match(r'^[^{};]*\\[[^\\[\\]]*\\][^{}]*\\{[^{}]*\\}\\s*\\)*[;,]\\s*$',\n                       endline):\n            error(filename, linenum, 'readability/braces', 4,\n                  'If/else bodies with multiple statements require braces')\n        elif endlinenum < len(clean_lines.elided) - 1:\n          # Make sure the next line is dedented\n          next_line = clean_lines.elided[endlinenum + 1]\n          next_indent = GetIndentLevel(next_line)\n          # With ambiguous nested if statements, this will error out on the\n          # if that *doesn't* match the else, regardless of whether it's the\n          # inner one or outer one.\n          if (if_match and Match(r'\\s*else\\b', next_line)\n              and next_indent != if_indent):\n            error(filename, linenum, 'readability/braces', 4,\n                  'Else clause should be indented at the same level as if. '\n                  'Ambiguous nested if/else chains require braces.')\n          elif next_indent > if_indent:\n            error(filename, linenum, 'readability/braces', 4,\n                  'If/else bodies with multiple statements require braces')\n\n\ndef CheckTrailingSemicolon(filename, clean_lines, linenum, error):\n  \"\"\"Looks for redundant trailing semicolon.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  line = clean_lines.elided[linenum]\n\n  # Block bodies should not be followed by a semicolon.  Due to C++11\n  # brace initialization, there are more places where semicolons are\n  # required than not, so we use an allowed list approach to check these\n  # rather than an exclusion list.  These are the places where \"};\" should\n  # be replaced by just \"}\":\n  # 1. Some flavor of block following closing parenthesis:\n  #    for (;;) {};\n  #    while (...) {};\n  #    switch (...) {};\n  #    Function(...) {};\n  #    if (...) {};\n  #    if (...) else if (...) {};\n  #\n  # 2. else block:\n  #    if (...) else {};\n  #\n  # 3. const member function:\n  #    Function(...) const {};\n  #\n  # 4. Block following some statement:\n  #    x = 42;\n  #    {};\n  #\n  # 5. Block at the beginning of a function:\n  #    Function(...) {\n  #      {};\n  #    }\n  #\n  #    Note that naively checking for the preceding \"{\" will also match\n  #    braces inside multi-dimensional arrays, but this is fine since\n  #    that expression will not contain semicolons.\n  #\n  # 6. Block following another block:\n  #    while (true) {}\n  #    {};\n  #\n  # 7. End of namespaces:\n  #    namespace {};\n  #\n  #    These semicolons seems far more common than other kinds of\n  #    redundant semicolons, possibly due to people converting classes\n  #    to namespaces.  For now we do not warn for this case.\n  #\n  # Try matching case 1 first.\n  match = Match(r'^(.*\\)\\s*)\\{', line)\n  if match:\n    # Matched closing parenthesis (case 1).  Check the token before the\n    # matching opening parenthesis, and don't warn if it looks like a\n    # macro.  This avoids these false positives:\n    #  - macro that defines a base class\n    #  - multi-line macro that defines a base class\n    #  - macro that defines the whole class-head\n    #\n    # But we still issue warnings for macros that we know are safe to\n    # warn, specifically:\n    #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P\n    #  - TYPED_TEST\n    #  - INTERFACE_DEF\n    #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:\n    #\n    # We implement a list of allowed safe macros instead of a list of\n    # unsafe macros, even though the latter appears less frequently in\n    # google code and would have been easier to implement.  This is because\n    # the downside for getting the allowed list wrong means some extra\n    # semicolons, while the downside for getting the exclusion list wrong\n    # would result in compile errors.\n    #\n    # In addition to macros, we also don't want to warn on\n    #  - Compound literals\n    #  - Lambdas\n    #  - alignas specifier with anonymous structs\n    #  - decltype\n    closing_brace_pos = match.group(1).rfind(')')\n    opening_parenthesis = ReverseCloseExpression(\n        clean_lines, linenum, closing_brace_pos)\n    if opening_parenthesis[2] > -1:\n      line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]\n      macro = Search(r'\\b([A-Z_][A-Z0-9_]*)\\s*$', line_prefix)\n      func = Match(r'^(.*\\])\\s*$', line_prefix)\n      if ((macro and\n           macro.group(1) not in (\n               'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',\n               'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',\n               'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or\n          (func and not Search(r'\\boperator\\s*\\[\\s*\\]', func.group(1))) or\n          Search(r'\\b(?:struct|union)\\s+alignas\\s*$', line_prefix) or\n          Search(r'\\bdecltype$', line_prefix) or\n          Search(r'\\s+=\\s*$', line_prefix)):\n        match = None\n    if (match and\n        opening_parenthesis[1] > 1 and\n        Search(r'\\]\\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):\n      # Multi-line lambda-expression\n      match = None\n\n  else:\n    # Try matching cases 2-3.\n    match = Match(r'^(.*(?:else|\\)\\s*const)\\s*)\\{', line)\n    if not match:\n      # Try matching cases 4-6.  These are always matched on separate lines.\n      #\n      # Note that we can't simply concatenate the previous line to the\n      # current line and do a single match, otherwise we may output\n      # duplicate warnings for the blank line case:\n      #   if (cond) {\n      #     // blank line\n      #   }\n      prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]\n      if prevline and Search(r'[;{}]\\s*$', prevline):\n        match = Match(r'^(\\s*)\\{', line)\n\n  # Check matching closing brace\n  if match:\n    (endline, endlinenum, endpos) = CloseExpression(\n        clean_lines, linenum, len(match.group(1)))\n    if endpos > -1 and Match(r'^\\s*;', endline[endpos:]):\n      # Current {} pair is eligible for semicolon check, and we have found\n      # the redundant semicolon, output warning here.\n      #\n      # Note: because we are scanning forward for opening braces, and\n      # outputting warnings for the matching closing brace, if there are\n      # nested blocks with trailing semicolons, we will get the error\n      # messages in reversed order.\n\n      # We need to check the line forward for NOLINT\n      raw_lines = clean_lines.raw_lines\n      ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,\n                              error)\n      ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,\n                              error)\n\n      error(filename, endlinenum, 'readability/braces', 4,\n            \"You don't need a ; after a }\")\n\n\ndef CheckEmptyBlockBody(filename, clean_lines, linenum, error):\n  \"\"\"Look for empty loop/conditional body with only a single semicolon.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  # Search for loop keywords at the beginning of the line.  Because only\n  # whitespaces are allowed before the keywords, this will also ignore most\n  # do-while-loops, since those lines should start with closing brace.\n  #\n  # We also check \"if\" blocks here, since an empty conditional block\n  # is likely an error.\n  line = clean_lines.elided[linenum]\n  matched = Match(r'\\s*(for|while|if)\\s*\\(', line)\n  if matched:\n    # Find the end of the conditional expression.\n    (end_line, end_linenum, end_pos) = CloseExpression(\n        clean_lines, linenum, line.find('('))\n\n    # Output warning if what follows the condition expression is a semicolon.\n    # No warning for all other cases, including whitespace or newline, since we\n    # have a separate check for semicolons preceded by whitespace.\n    if end_pos >= 0 and Match(r';', end_line[end_pos:]):\n      if matched.group(1) == 'if':\n        error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,\n              'Empty conditional bodies should use {}')\n      else:\n        error(filename, end_linenum, 'whitespace/empty_loop_body', 5,\n              'Empty loop bodies should use {} or continue')\n\n    # Check for if statements that have completely empty bodies (no comments)\n    # and no else clauses.\n    if end_pos >= 0 and matched.group(1) == 'if':\n      # Find the position of the opening { for the if statement.\n      # Return without logging an error if it has no brackets.\n      opening_linenum = end_linenum\n      opening_line_fragment = end_line[end_pos:]\n      # Loop until EOF or find anything that's not whitespace or opening {.\n      while not Search(r'^\\s*\\{', opening_line_fragment):\n        if Search(r'^(?!\\s*$)', opening_line_fragment):\n          # Conditional has no brackets.\n          return\n        opening_linenum += 1\n        if opening_linenum == len(clean_lines.elided):\n          # Couldn't find conditional's opening { or any code before EOF.\n          return\n        opening_line_fragment = clean_lines.elided[opening_linenum]\n      # Set opening_line (opening_line_fragment may not be entire opening line).\n      opening_line = clean_lines.elided[opening_linenum]\n\n      # Find the position of the closing }.\n      opening_pos = opening_line_fragment.find('{')\n      if opening_linenum == end_linenum:\n        # We need to make opening_pos relative to the start of the entire line.\n        opening_pos += end_pos\n      (closing_line, closing_linenum, closing_pos) = CloseExpression(\n          clean_lines, opening_linenum, opening_pos)\n      if closing_pos < 0:\n        return\n\n      # Now construct the body of the conditional. This consists of the portion\n      # of the opening line after the {, all lines until the closing line,\n      # and the portion of the closing line before the }.\n      if (clean_lines.raw_lines[opening_linenum] !=\n          CleanseComments(clean_lines.raw_lines[opening_linenum])):\n        # Opening line ends with a comment, so conditional isn't empty.\n        return\n      if closing_linenum > opening_linenum:\n        # Opening line after the {. Ignore comments here since we checked above.\n        body = list(opening_line[opening_pos+1:])\n        # All lines until closing line, excluding closing line, with comments.\n        body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])\n        # Closing line before the }. Won't (and can't) have comments.\n        body.append(clean_lines.elided[closing_linenum][:closing_pos-1])\n        body = '\\n'.join(body)\n      else:\n        # If statement has brackets and fits on a single line.\n        body = opening_line[opening_pos+1:closing_pos-1]\n\n      # Check if the body is empty\n      if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):\n        return\n      # The body is empty. Now make sure there's not an else clause.\n      current_linenum = closing_linenum\n      current_line_fragment = closing_line[closing_pos:]\n      # Loop until EOF or find anything that's not whitespace or else clause.\n      while Search(r'^\\s*$|^(?=\\s*else)', current_line_fragment):\n        if Search(r'^(?=\\s*else)', current_line_fragment):\n          # Found an else clause, so don't log an error.\n          return\n        current_linenum += 1\n        if current_linenum == len(clean_lines.elided):\n          break\n        current_line_fragment = clean_lines.elided[current_linenum]\n\n      # The body is empty and there's no else clause until EOF or other code.\n      error(filename, end_linenum, 'whitespace/empty_if_body', 4,\n            ('If statement had no body and no else clause'))\n\n\ndef FindCheckMacro(line):\n  \"\"\"Find a replaceable CHECK-like macro.\n\n  Args:\n    line: line to search on.\n  Returns:\n    (macro name, start position), or (None, -1) if no replaceable\n    macro is found.\n  \"\"\"\n  for macro in _CHECK_MACROS:\n    i = line.find(macro)\n    if i >= 0:\n      # Find opening parenthesis.  Do a regular expression match here\n      # to make sure that we are matching the expected CHECK macro, as\n      # opposed to some other macro that happens to contain the CHECK\n      # substring.\n      matched = Match(r'^(.*\\b' + macro + r'\\s*)\\(', line)\n      if not matched:\n        continue\n      return (macro, len(matched.group(1)))\n  return (None, -1)\n\n\ndef CheckCheck(filename, clean_lines, linenum, error):\n  \"\"\"Checks the use of CHECK and EXPECT macros.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  # Decide the set of replacement macros that should be suggested\n  lines = clean_lines.elided\n  (check_macro, start_pos) = FindCheckMacro(lines[linenum])\n  if not check_macro:\n    return\n\n  # Find end of the boolean expression by matching parentheses\n  (last_line, end_line, end_pos) = CloseExpression(\n      clean_lines, linenum, start_pos)\n  if end_pos < 0:\n    return\n\n  # If the check macro is followed by something other than a\n  # semicolon, assume users will log their own custom error messages\n  # and don't suggest any replacements.\n  if not Match(r'\\s*;', last_line[end_pos:]):\n    return\n\n  if linenum == end_line:\n    expression = lines[linenum][start_pos + 1:end_pos - 1]\n  else:\n    expression = lines[linenum][start_pos + 1:]\n    for i in xrange(linenum + 1, end_line):\n      expression += lines[i]\n    expression += last_line[0:end_pos - 1]\n\n  # Parse expression so that we can take parentheses into account.\n  # This avoids false positives for inputs like \"CHECK((a < 4) == b)\",\n  # which is not replaceable by CHECK_LE.\n  lhs = ''\n  rhs = ''\n  operator = None\n  while expression:\n    matched = Match(r'^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||'\n                    r'==|!=|>=|>|<=|<|\\()(.*)$', expression)\n    if matched:\n      token = matched.group(1)\n      if token == '(':\n        # Parenthesized operand\n        expression = matched.group(2)\n        (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])\n        if end < 0:\n          return  # Unmatched parenthesis\n        lhs += '(' + expression[0:end]\n        expression = expression[end:]\n      elif token in ('&&', '||'):\n        # Logical and/or operators.  This means the expression\n        # contains more than one term, for example:\n        #   CHECK(42 < a && a < b);\n        #\n        # These are not replaceable with CHECK_LE, so bail out early.\n        return\n      elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):\n        # Non-relational operator\n        lhs += token\n        expression = matched.group(2)\n      else:\n        # Relational operator\n        operator = token\n        rhs = matched.group(2)\n        break\n    else:\n      # Unparenthesized operand.  Instead of appending to lhs one character\n      # at a time, we do another regular expression match to consume several\n      # characters at once if possible.  Trivial benchmark shows that this\n      # is more efficient when the operands are longer than a single\n      # character, which is generally the case.\n      matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)\n      if not matched:\n        matched = Match(r'^(\\s*\\S)(.*)$', expression)\n        if not matched:\n          break\n      lhs += matched.group(1)\n      expression = matched.group(2)\n\n  # Only apply checks if we got all parts of the boolean expression\n  if not (lhs and operator and rhs):\n    return\n\n  # Check that rhs do not contain logical operators.  We already know\n  # that lhs is fine since the loop above parses out && and ||.\n  if rhs.find('&&') > -1 or rhs.find('||') > -1:\n    return\n\n  # At least one of the operands must be a constant literal.  This is\n  # to avoid suggesting replacements for unprintable things like\n  # CHECK(variable != iterator)\n  #\n  # The following pattern matches decimal, hex integers, strings, and\n  # characters (in that order).\n  lhs = lhs.strip()\n  rhs = rhs.strip()\n  match_constant = r'^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\'.*\\')$'\n  if Match(match_constant, lhs) or Match(match_constant, rhs):\n    # Note: since we know both lhs and rhs, we can provide a more\n    # descriptive error message like:\n    #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)\n    # Instead of:\n    #   Consider using CHECK_EQ instead of CHECK(a == b)\n    #\n    # We are still keeping the less descriptive message because if lhs\n    # or rhs gets long, the error message might become unreadable.\n    error(filename, linenum, 'readability/check', 2,\n          'Consider using %s instead of %s(a %s b)' % (\n              _CHECK_REPLACEMENT[check_macro][operator],\n              check_macro, operator))\n\n\ndef CheckAltTokens(filename, clean_lines, linenum, error):\n  \"\"\"Check alternative keywords being used in boolean expressions.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Avoid preprocessor lines\n  if Match(r'^\\s*#', line):\n    return\n\n  # Last ditch effort to avoid multi-line comments.  This will not help\n  # if the comment started before the current line or ended after the\n  # current line, but it catches most of the false positives.  At least,\n  # it provides a way to workaround this warning for people who use\n  # multi-line comments in preprocessor macros.\n  #\n  # TODO(unknown): remove this once cpplint has better support for\n  # multi-line comments.\n  if line.find('/*') >= 0 or line.find('*/') >= 0:\n    return\n\n  for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):\n    error(filename, linenum, 'readability/alt_tokens', 2,\n          'Use operator %s instead of %s' % (\n              _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))\n\n\ndef GetLineWidth(line):\n  \"\"\"Determines the width of the line in column positions.\n\n  Args:\n    line: A string, which may be a Unicode string.\n\n  Returns:\n    The width of the line in column positions, accounting for Unicode\n    combining characters and wide characters.\n  \"\"\"\n  if isinstance(line, unicode):\n    width = 0\n    for uc in unicodedata.normalize('NFC', line):\n      if unicodedata.east_asian_width(uc) in ('W', 'F'):\n        width += 2\n      elif not unicodedata.combining(uc):\n        # Issue 337\n        # https://mail.python.org/pipermail/python-list/2012-August/628809.html\n        if (sys.version_info.major, sys.version_info.minor) <= (3, 2):\n          # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81\n          is_wide_build = sysconfig.get_config_var(\"Py_UNICODE_SIZE\") >= 4\n          # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564\n          is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF\n          if not is_wide_build and is_low_surrogate:\n            width -= 1\n          \n        width += 1\n    return width\n  else:\n    return len(line)\n\n\ndef CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,\n               error):\n  \"\"\"Checks rules from the 'C++ style rules' section of cppguide.html.\n\n  Most of these rules are hard to test (naming, comment style), but we\n  do what we can.  In particular we check for 2-space indents, line lengths,\n  tab usage, spaces inside code, etc.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    file_extension: The extension (without the dot) of the filename.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: The function to call with any errors found.\n  \"\"\"\n\n  # Don't use \"elided\" lines here, otherwise we can't check commented lines.\n  # Don't want to use \"raw\" either, because we don't want to check inside C++11\n  # raw strings,\n  raw_lines = clean_lines.lines_without_raw_strings\n  line = raw_lines[linenum]\n  prev = raw_lines[linenum - 1] if linenum > 0 else ''\n\n  if line.find('\\t') != -1:\n    error(filename, linenum, 'whitespace/tab', 1,\n          'Tab found; better to use spaces')\n\n  # One or three blank spaces at the beginning of the line is weird; it's\n  # hard to reconcile that with 2-space indents.\n  # NOTE: here are the conditions rob pike used for his tests.  Mine aren't\n  # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces\n  # if(RLENGTH > 20) complain = 0;\n  # if(match($0, \" +(error|private|public|protected):\")) complain = 0;\n  # if(match(prev, \"&& *$\")) complain = 0;\n  # if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;\n  # if(match(prev, \"[\\\",=><] *$\")) complain = 0;\n  # if(match($0, \" <<\")) complain = 0;\n  # if(match(prev, \" +for \\\\(\")) complain = 0;\n  # if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;\n  scope_or_label_pattern = r'\\s*\\w+\\s*:\\s*\\\\?$'\n  classinfo = nesting_state.InnermostClass()\n  initial_spaces = 0\n  cleansed_line = clean_lines.elided[linenum]\n  while initial_spaces < len(line) and line[initial_spaces] == ' ':\n    initial_spaces += 1\n  # There are certain situations we allow one space, notably for\n  # section labels, and also lines containing multi-line raw strings.\n  # We also don't check for lines that look like continuation lines\n  # (of lines ending in double quotes, commas, equals, or angle brackets)\n  # because the rules for how to indent those are non-trivial.\n  if (not Search(r'[\",=><] *$', prev) and\n      (initial_spaces == 1 or initial_spaces == 3) and\n      not Match(scope_or_label_pattern, cleansed_line) and\n      not (clean_lines.raw_lines[linenum] != line and\n           Match(r'^\\s*\"\"', line))):\n    error(filename, linenum, 'whitespace/indent', 3,\n          'Weird number of spaces at line-start.  '\n          'Are you using a 2-space indent?')\n\n  if line and line[-1].isspace():\n    error(filename, linenum, 'whitespace/end_of_line', 4,\n          'Line ends in whitespace.  Consider deleting these extra spaces.')\n\n  # Check if the line is a header guard.\n  is_header_guard = False\n  if IsHeaderExtension(file_extension):\n    cppvar = GetHeaderGuardCPPVariable(filename)\n    if (line.startswith('#ifndef %s' % cppvar) or\n        line.startswith('#define %s' % cppvar) or\n        line.startswith('#endif  // %s' % cppvar)):\n      is_header_guard = True\n  # #include lines and header guards can be long, since there's no clean way to\n  # split them.\n  #\n  # URLs can be long too.  It's possible to split these, but it makes them\n  # harder to cut&paste.\n  #\n  # The \"$Id:...$\" comment may also get very long without it being the\n  # developers fault.\n  if (not line.startswith('#include') and not is_header_guard and\n      not Match(r'^\\s*//.*http(s?)://\\S*$', line) and\n      not Match(r'^\\s*//\\s*[^\\s]*$', line) and\n      not Match(r'^// \\$Id:.*#[0-9]+ \\$$', line)):\n    line_width = GetLineWidth(line)\n    if line_width > _line_length:\n      error(filename, linenum, 'whitespace/line_length', 2,\n            'Lines should be <= %i characters long' % _line_length)\n\n  if (cleansed_line.count(';') > 1 and\n      # for loops are allowed two ;'s (and may run over two lines).\n      cleansed_line.find('for') == -1 and\n      (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or\n       GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and\n      # It's ok to have many commands in a switch case that fits in 1 line\n      not ((cleansed_line.find('case ') != -1 or\n            cleansed_line.find('default:') != -1) and\n           cleansed_line.find('break;') != -1)):\n    error(filename, linenum, 'whitespace/newline', 0,\n          'More than one command on the same line')\n\n  # Some more style checks\n  CheckBraces(filename, clean_lines, linenum, error)\n  CheckTrailingSemicolon(filename, clean_lines, linenum, error)\n  CheckEmptyBlockBody(filename, clean_lines, linenum, error)\n  CheckSpacing(filename, clean_lines, linenum, nesting_state, error)\n  CheckOperatorSpacing(filename, clean_lines, linenum, error)\n  CheckParenthesisSpacing(filename, clean_lines, linenum, error)\n  CheckCommaSpacing(filename, clean_lines, linenum, error)\n  CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)\n  CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)\n  CheckCheck(filename, clean_lines, linenum, error)\n  CheckAltTokens(filename, clean_lines, linenum, error)\n  classinfo = nesting_state.InnermostClass()\n  if classinfo:\n    CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)\n\n\n_RE_PATTERN_INCLUDE = re.compile(r'^\\s*#\\s*include\\s*([<\"])([^>\"]*)[>\"].*$')\n# Matches the first component of a filename delimited by -s and _s. That is:\n#  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'\n#  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'\n#  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'\n#  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'\n_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')\n\n\ndef _DropCommonSuffixes(filename):\n  \"\"\"Drops common suffixes like _test.cc or -inl.h from filename.\n\n  For example:\n    >>> _DropCommonSuffixes('foo/foo-inl.h')\n    'foo/foo'\n    >>> _DropCommonSuffixes('foo/bar/foo.cc')\n    'foo/bar/foo'\n    >>> _DropCommonSuffixes('foo/foo_internal.h')\n    'foo/foo'\n    >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n    'foo/foo_unusualinternal'\n\n  Args:\n    filename: The input filename.\n\n  Returns:\n    The filename with the common suffix removed.\n  \"\"\"\n  for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',\n                 'inl.h', 'impl.h', 'internal.h'):\n    if (filename.endswith(suffix) and len(filename) > len(suffix) and\n        filename[-len(suffix) - 1] in ('-', '_')):\n      return filename[:-len(suffix) - 1]\n  return os.path.splitext(filename)[0]\n\n\ndef _ClassifyInclude(fileinfo, include, is_system):\n  \"\"\"Figures out what kind of header 'include' is.\n\n  Args:\n    fileinfo: The current file cpplint is running over. A FileInfo instance.\n    include: The path to a #included file.\n    is_system: True if the #include used <> rather than \"\".\n\n  Returns:\n    One of the _XXX_HEADER constants.\n\n  For example:\n    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)\n    _C_SYS_HEADER\n    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)\n    _CPP_SYS_HEADER\n    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)\n    _LIKELY_MY_HEADER\n    >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),\n    ...                  'bar/foo_other_ext.h', False)\n    _POSSIBLE_MY_HEADER\n    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)\n    _OTHER_HEADER\n  \"\"\"\n  # This is a list of all standard c++ header files, except\n  # those already checked for above.\n  is_cpp_h = include in _CPP_HEADERS\n\n  if is_system:\n    if is_cpp_h:\n      return _CPP_SYS_HEADER\n    else:\n      return _C_SYS_HEADER\n\n  # If the target file and the include we're checking share a\n  # basename when we drop common extensions, and the include\n  # lives in . , then it's likely to be owned by the target file.\n  target_dir, target_base = (\n      os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))\n  include_dir, include_base = os.path.split(_DropCommonSuffixes(include))\n  if target_base == include_base and (\n      include_dir == target_dir or\n      include_dir == os.path.normpath(target_dir + '/../public')):\n    return _LIKELY_MY_HEADER\n\n  # If the target and include share some initial basename\n  # component, it's possible the target is implementing the\n  # include, so it's allowed to be first, but we'll never\n  # complain if it's not there.\n  target_first_component = _RE_FIRST_COMPONENT.match(target_base)\n  include_first_component = _RE_FIRST_COMPONENT.match(include_base)\n  if (target_first_component and include_first_component and\n      target_first_component.group(0) ==\n      include_first_component.group(0)):\n    return _POSSIBLE_MY_HEADER\n\n  return _OTHER_HEADER\n\n\n\ndef CheckIncludeLine(filename, clean_lines, linenum, include_state, error):\n  \"\"\"Check rules that are applicable to #include lines.\n\n  Strings on #include lines are NOT removed from elided line, to make\n  certain tasks easier. However, to prevent false positives, checks\n  applicable to #include lines in CheckLanguage must be put here.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    include_state: An _IncludeState instance in which the headers are inserted.\n    error: The function to call with any errors found.\n  \"\"\"\n  fileinfo = FileInfo(filename)\n  line = clean_lines.lines[linenum]\n\n  # \"include\" should use the new style \"foo/bar.h\" instead of just \"bar.h\"\n  # Only do this check if the included header follows google naming\n  # conventions.  If not, assume that it's a 3rd party API that\n  # requires special include conventions.\n  #\n  # We also make an exception for Lua headers, which follow google\n  # naming convention but not the include convention.\n  match = Match(r'#include\\s*\"([^/]+\\.h)\"', line)\n  if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):\n    error(filename, linenum, 'build/include', 4,\n          'Include the directory when naming .h files')\n\n  # we shouldn't include a file more than once. actually, there are a\n  # handful of instances where doing so is okay, but in general it's\n  # not.\n  match = _RE_PATTERN_INCLUDE.search(line)\n  if match:\n    include = match.group(2)\n    is_system = (match.group(1) == '<')\n    duplicate_line = include_state.FindHeader(include)\n    if duplicate_line >= 0:\n      error(filename, linenum, 'build/include', 4,\n            '\"%s\" already included at %s:%s' %\n            (include, filename, duplicate_line))\n    elif (include.endswith('.cc') and\n          os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):\n      error(filename, linenum, 'build/include', 4,\n            'Do not include .cc files from other packages')\n    elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):\n      include_state.include_list[-1].append((include, linenum))\n\n      # We want to ensure that headers appear in the right order:\n      # 1) for foo.cc, foo.h  (preferred location)\n      # 2) c system files\n      # 3) cpp system files\n      # 4) for foo.cc, foo.h  (deprecated location)\n      # 5) other google headers\n      #\n      # We classify each include statement as one of those 5 types\n      # using a number of techniques. The include_state object keeps\n      # track of the highest type seen, and complains if we see a\n      # lower type after that.\n      error_message = include_state.CheckNextIncludeOrder(\n          _ClassifyInclude(fileinfo, include, is_system))\n      if error_message:\n        error(filename, linenum, 'build/include_order', 4,\n              '%s. Should be: %s.h, c system, c++ system, other.' %\n              (error_message, fileinfo.BaseName()))\n      canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)\n      if not include_state.IsInAlphabeticalOrder(\n          clean_lines, linenum, canonical_include):\n        error(filename, linenum, 'build/include_alpha', 4,\n              'Include \"%s\" not in alphabetical order' % include)\n      include_state.SetLastHeader(canonical_include)\n\n\n\ndef _GetTextInside(text, start_pattern):\n  r\"\"\"Retrieves all the text between matching open and close parentheses.\n\n  Given a string of lines and a regular expression string, retrieve all the text\n  following the expression and between opening punctuation symbols like\n  (, [, or {, and the matching close-punctuation symbol. This properly nested\n  occurrences of the punctuations, so for the text like\n    printf(a(), b(c()));\n  a call to _GetTextInside(text, r'printf\\(') will return 'a(), b(c())'.\n  start_pattern must match string having an open punctuation symbol at the end.\n\n  Args:\n    text: The lines to extract text. Its comments and strings must be elided.\n           It can be single line and can span multiple lines.\n    start_pattern: The regexp string indicating where to start extracting\n                   the text.\n  Returns:\n    The extracted text.\n    None if either the opening string or ending punctuation could not be found.\n  \"\"\"\n  # TODO(unknown): Audit cpplint.py to see what places could be profitably\n  # rewritten to use _GetTextInside (and use inferior regexp matching today).\n\n  # Give opening punctuations to get the matching close-punctuations.\n  matching_punctuation = {'(': ')', '{': '}', '[': ']'}\n  closing_punctuation = set(matching_punctuation.itervalues())\n\n  # Find the position to start extracting text.\n  match = re.search(start_pattern, text, re.M)\n  if not match:  # start_pattern not found in text.\n    return None\n  start_position = match.end(0)\n\n  assert start_position > 0, (\n      'start_pattern must ends with an opening punctuation.')\n  assert text[start_position - 1] in matching_punctuation, (\n      'start_pattern must ends with an opening punctuation.')\n  # Stack of closing punctuations we expect to have in text after position.\n  punctuation_stack = [matching_punctuation[text[start_position - 1]]]\n  position = start_position\n  while punctuation_stack and position < len(text):\n    if text[position] == punctuation_stack[-1]:\n      punctuation_stack.pop()\n    elif text[position] in closing_punctuation:\n      # A closing punctuation without matching opening punctuations.\n      return None\n    elif text[position] in matching_punctuation:\n      punctuation_stack.append(matching_punctuation[text[position]])\n    position += 1\n  if punctuation_stack:\n    # Opening punctuations left without matching close-punctuations.\n    return None\n  # punctuations match.\n  return text[start_position:position - 1]\n\n\n# Patterns for matching call-by-reference parameters.\n#\n# Supports nested templates up to 2 levels deep using this messy pattern:\n#   < (?: < (?: < [^<>]*\n#               >\n#           |   [^<>] )*\n#         >\n#     |   [^<>] )*\n#   >\n_RE_PATTERN_IDENT = r'[_a-zA-Z]\\w*'  # =~ [[:alpha:]][[:alnum:]]*\n_RE_PATTERN_TYPE = (\n    r'(?:const\\s+)?(?:typename\\s+|class\\s+|struct\\s+|union\\s+|enum\\s+)?'\n    r'(?:\\w|'\n    r'\\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'\n    r'::)+')\n# A call-by-reference parameter ends with '& identifier'.\n_RE_PATTERN_REF_PARAM = re.compile(\n    r'(' + _RE_PATTERN_TYPE + r'(?:\\s*(?:\\bconst\\b|[*]))*\\s*'\n    r'&\\s*' + _RE_PATTERN_IDENT + r')\\s*(?:=[^,()]+)?[,)]')\n# A call-by-const-reference parameter either ends with 'const& identifier'\n# or looks like 'const type& identifier' when 'type' is atomic.\n_RE_PATTERN_CONST_REF_PARAM = (\n    r'(?:.*\\s*\\bconst\\s*&\\s*' + _RE_PATTERN_IDENT +\n    r'|const\\s+' + _RE_PATTERN_TYPE + r'\\s*&\\s*' + _RE_PATTERN_IDENT + r')')\n# Stream types.\n_RE_PATTERN_REF_STREAM_PARAM = (\n    r'(?:.*stream\\s*&\\s*' + _RE_PATTERN_IDENT + r')')\n\n\ndef CheckLanguage(filename, clean_lines, linenum, file_extension,\n                  include_state, nesting_state, error):\n  \"\"\"Checks rules from the 'C++ language rules' section of cppguide.html.\n\n  Some of these rules are hard to test (function overloading, using\n  uint32 inappropriately), but we do the best we can.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    file_extension: The extension (without the dot) of the filename.\n    include_state: An _IncludeState instance in which the headers are inserted.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: The function to call with any errors found.\n  \"\"\"\n  # If the line is empty or consists of entirely a comment, no need to\n  # check it.\n  line = clean_lines.elided[linenum]\n  if not line:\n    return\n\n  match = _RE_PATTERN_INCLUDE.search(line)\n  if match:\n    CheckIncludeLine(filename, clean_lines, linenum, include_state, error)\n    return\n\n  # Reset include state across preprocessor directives.  This is meant\n  # to silence warnings for conditional includes.\n  match = Match(r'^\\s*#\\s*(if|ifdef|ifndef|elif|else|endif)\\b', line)\n  if match:\n    include_state.ResetSection(match.group(1))\n\n  # Make Windows paths like Unix.\n  fullname = os.path.abspath(filename).replace('\\\\', '/')\n\n  # Perform other checks now that we are sure that this is not an include line\n  CheckCasts(filename, clean_lines, linenum, error)\n  CheckGlobalStatic(filename, clean_lines, linenum, error)\n  CheckPrintf(filename, clean_lines, linenum, error)\n\n  if IsHeaderExtension(file_extension):\n    # TODO(unknown): check that 1-arg constructors are explicit.\n    #                How to tell it's a constructor?\n    #                (handled in CheckForNonStandardConstructs for now)\n    # TODO(unknown): check that classes declare or disable copy/assign\n    #                (level 1 error)\n    pass\n\n  # Check if people are using the verboten C basic types.  The only exception\n  # we regularly allow is \"unsigned short port\" for port.\n  if Search(r'\\bshort port\\b', line):\n    if not Search(r'\\bunsigned short port\\b', line):\n      error(filename, linenum, 'runtime/int', 4,\n            'Use \"unsigned short\" for ports, not \"short\"')\n  else:\n    match = Search(r'\\b(short|long(?! +double)|long long)\\b', line)\n    if match:\n      error(filename, linenum, 'runtime/int', 4,\n            'Use int16/int64/etc, rather than the C type %s' % match.group(1))\n\n  # Check if some verboten operator overloading is going on\n  # TODO(unknown): catch out-of-line unary operator&:\n  #   class X {};\n  #   int operator&(const X& x) { return 42; }  // unary operator&\n  # The trick is it's hard to tell apart from binary operator&:\n  #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&\n  if Search(r'\\boperator\\s*&\\s*\\(\\s*\\)', line):\n    error(filename, linenum, 'runtime/operator', 4,\n          'Unary operator& is dangerous.  Do not use it.')\n\n  # Check for suspicious usage of \"if\" like\n  # } if (a == b) {\n  if Search(r'\\}\\s*if\\s*\\(', line):\n    error(filename, linenum, 'readability/braces', 4,\n          'Did you mean \"else if\"? If not, start a new line for \"if\".')\n\n  # Check for potential format string bugs like printf(foo).\n  # We constrain the pattern not to pick things like DocidForPrintf(foo).\n  # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())\n  # TODO(unknown): Catch the following case. Need to change the calling\n  # convention of the whole function to process multiple line to handle it.\n  #   printf(\n  #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);\n  printf_args = _GetTextInside(line, r'(?i)\\b(string)?printf\\s*\\(')\n  if printf_args:\n    match = Match(r'([\\w.\\->()]+)$', printf_args)\n    if match and match.group(1) != '__VA_ARGS__':\n      function_name = re.search(r'\\b((?:string)?printf)\\s*\\(',\n                                line, re.I).group(1)\n      error(filename, linenum, 'runtime/printf', 4,\n            'Potential format string bug. Do %s(\"%%s\", %s) instead.'\n            % (function_name, match.group(1)))\n\n  # Check for potential memset bugs like memset(buf, sizeof(buf), 0).\n  match = Search(r'memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)', line)\n  if match and not Match(r\"^''|-?[0-9]+|0x[0-9A-Fa-f]$\", match.group(2)):\n    error(filename, linenum, 'runtime/memset', 4,\n          'Did you mean \"memset(%s, 0, %s)\"?'\n          % (match.group(1), match.group(2)))\n\n  if Search(r'\\busing namespace\\b', line):\n    error(filename, linenum, 'build/namespaces', 5,\n          'Do not use namespace using-directives.  '\n          'Use using-declarations instead.')\n\n  # Detect variable-length arrays.\n  match = Match(r'\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];', line)\n  if (match and match.group(2) != 'return' and match.group(2) != 'delete' and\n      match.group(3).find(']') == -1):\n    # Split the size using space and arithmetic operators as delimiters.\n    # If any of the resulting tokens are not compile time constants then\n    # report the error.\n    tokens = re.split(r'\\s|\\+|\\-|\\*|\\/|<<|>>]', match.group(3))\n    is_const = True\n    skip_next = False\n    for tok in tokens:\n      if skip_next:\n        skip_next = False\n        continue\n\n      if Search(r'sizeof\\(.+\\)', tok): continue\n      if Search(r'arraysize\\(\\w+\\)', tok): continue\n\n      tok = tok.lstrip('(')\n      tok = tok.rstrip(')')\n      if not tok: continue\n      if Match(r'\\d+', tok): continue\n      if Match(r'0[xX][0-9a-fA-F]+', tok): continue\n      if Match(r'k[A-Z0-9]\\w*', tok): continue\n      if Match(r'(.+::)?k[A-Z0-9]\\w*', tok): continue\n      if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue\n      # A catch all for tricky sizeof cases, including 'sizeof expression',\n      # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'\n      # requires skipping the next token because we split on ' ' and '*'.\n      if tok.startswith('sizeof'):\n        skip_next = True\n        continue\n      is_const = False\n      break\n    if not is_const:\n      error(filename, linenum, 'runtime/arrays', 1,\n            'Do not use variable-length arrays.  Use an appropriately named '\n            \"('k' followed by CamelCase) compile-time constant for the size.\")\n\n  # Check for use of unnamed namespaces in header files.  Registration\n  # macros are typically OK, so we allow use of \"namespace {\" on lines\n  # that end with backslashes.\n  if (IsHeaderExtension(file_extension)\n      and Search(r'\\bnamespace\\s*{', line)\n      and line[-1] != '\\\\'):\n    error(filename, linenum, 'build/namespaces', 4,\n          'Do not use unnamed namespaces in header files.  See '\n          'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'\n          ' for more information.')\n\n\ndef CheckGlobalStatic(filename, clean_lines, linenum, error):\n  \"\"\"Check for unsafe global or static objects.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Match two lines at a time to support multiline declarations\n  if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):\n    line += clean_lines.elided[linenum + 1].strip()\n\n  # Check for people declaring static/global STL strings at the top level.\n  # This is dangerous because the C++ language does not guarantee that\n  # globals with constructors are initialized before the first access, and\n  # also because globals can be destroyed when some threads are still running.\n  # TODO(unknown): Generalize this to also find static unique_ptr instances.\n  # TODO(unknown): File bugs for clang-tidy to find these.\n  match = Match(\n      r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'\n      r'([a-zA-Z0-9_:]+)\\b(.*)',\n      line)\n\n  # Remove false positives:\n  # - String pointers (as opposed to values).\n  #    string *pointer\n  #    const string *pointer\n  #    string const *pointer\n  #    string *const pointer\n  #\n  # - Functions and template specializations.\n  #    string Function<Type>(...\n  #    string Class<Type>::Method(...\n  #\n  # - Operators.  These are matched separately because operator names\n  #   cross non-word boundaries, and trying to match both operators\n  #   and functions at the same time would decrease accuracy of\n  #   matching identifiers.\n  #    string Class::operator*()\n  if (match and\n      not Search(r'\\bstring\\b(\\s+const)?\\s*[\\*\\&]\\s*(const\\s+)?\\w', line) and\n      not Search(r'\\boperator\\W', line) and\n      not Match(r'\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\s*\\(([^\"]|$)', match.group(4))):\n    if Search(r'\\bconst\\b', line):\n      error(filename, linenum, 'runtime/string', 4,\n            'For a static/global string constant, use a C style string '\n            'instead: \"%schar%s %s[]\".' %\n            (match.group(1), match.group(2) or '', match.group(3)))\n    else:\n      error(filename, linenum, 'runtime/string', 4,\n            'Static/global string variables are not permitted.')\n\n  if (Search(r'\\b([A-Za-z0-9_]*_)\\(\\1\\)', line) or\n      Search(r'\\b([A-Za-z0-9_]*_)\\(CHECK_NOTNULL\\(\\1\\)\\)', line)):\n    error(filename, linenum, 'runtime/init', 4,\n          'You seem to be initializing a member variable with itself.')\n\n\ndef CheckPrintf(filename, clean_lines, linenum, error):\n  \"\"\"Check for printf related issues.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # When snprintf is used, the second argument shouldn't be a literal.\n  match = Search(r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,', line)\n  if match and match.group(2) != '0':\n    # If 2nd arg is zero, snprintf is used to calculate size.\n    error(filename, linenum, 'runtime/printf', 3,\n          'If you can, use sizeof(%s) instead of %s as the 2nd arg '\n          'to snprintf.' % (match.group(1), match.group(2)))\n\n  # Check if some verboten C functions are being used.\n  if Search(r'\\bsprintf\\s*\\(', line):\n    error(filename, linenum, 'runtime/printf', 5,\n          'Never use sprintf. Use snprintf instead.')\n  match = Search(r'\\b(strcpy|strcat)\\s*\\(', line)\n  if match:\n    error(filename, linenum, 'runtime/printf', 4,\n          'Almost always, snprintf is better than %s' % match.group(1))\n\n\ndef IsDerivedFunction(clean_lines, linenum):\n  \"\"\"Check if current line contains an inherited function.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n  Returns:\n    True if current line contains a function with \"override\"\n    virt-specifier.\n  \"\"\"\n  # Scan back a few lines for start of current function\n  for i in xrange(linenum, max(-1, linenum - 10), -1):\n    match = Match(r'^([^()]*\\w+)\\(', clean_lines.elided[i])\n    if match:\n      # Look for \"override\" after the matching closing parenthesis\n      line, _, closing_paren = CloseExpression(\n          clean_lines, i, len(match.group(1)))\n      return (closing_paren >= 0 and\n              Search(r'\\boverride\\b', line[closing_paren:]))\n  return False\n\n\ndef IsOutOfLineMethodDefinition(clean_lines, linenum):\n  \"\"\"Check if current line contains an out-of-line method definition.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n  Returns:\n    True if current line contains an out-of-line method definition.\n  \"\"\"\n  # Scan back a few lines for start of current function\n  for i in xrange(linenum, max(-1, linenum - 10), -1):\n    if Match(r'^([^()]*\\w+)\\(', clean_lines.elided[i]):\n      return Match(r'^[^()]*\\w+::\\w+\\(', clean_lines.elided[i]) is not None\n  return False\n\n\ndef IsInitializerList(clean_lines, linenum):\n  \"\"\"Check if current line is inside constructor initializer list.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n  Returns:\n    True if current line appears to be inside constructor initializer\n    list, False otherwise.\n  \"\"\"\n  for i in xrange(linenum, 1, -1):\n    line = clean_lines.elided[i]\n    if i == linenum:\n      remove_function_body = Match(r'^(.*)\\{\\s*$', line)\n      if remove_function_body:\n        line = remove_function_body.group(1)\n\n    if Search(r'\\s:\\s*\\w+[({]', line):\n      # A lone colon tend to indicate the start of a constructor\n      # initializer list.  It could also be a ternary operator, which\n      # also tend to appear in constructor initializer lists as\n      # opposed to parameter lists.\n      return True\n    if Search(r'\\}\\s*,\\s*$', line):\n      # A closing brace followed by a comma is probably the end of a\n      # brace-initialized member in constructor initializer list.\n      return True\n    if Search(r'[{};]\\s*$', line):\n      # Found one of the following:\n      # - A closing brace or semicolon, probably the end of the previous\n      #   function.\n      # - An opening brace, probably the start of current class or namespace.\n      #\n      # Current line is probably not inside an initializer list since\n      # we saw one of those things without seeing the starting colon.\n      return False\n\n  # Got to the beginning of the file without seeing the start of\n  # constructor initializer list.\n  return False\n\n\ndef CheckForNonConstReference(filename, clean_lines, linenum,\n                              nesting_state, error):\n  \"\"\"Check for non-const references.\n\n  Separate from CheckLanguage since it scans backwards from current\n  line, instead of scanning forward.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: The function to call with any errors found.\n  \"\"\"\n  # Do nothing if there is no '&' on current line.\n  line = clean_lines.elided[linenum]\n  if '&' not in line:\n    return\n\n  # If a function is inherited, current function doesn't have much of\n  # a choice, so any non-const references should not be blamed on\n  # derived function.\n  if IsDerivedFunction(clean_lines, linenum):\n    return\n\n  # Don't warn on out-of-line method definitions, as we would warn on the\n  # in-line declaration, if it isn't marked with 'override'.\n  if IsOutOfLineMethodDefinition(clean_lines, linenum):\n    return\n\n  # Long type names may be broken across multiple lines, usually in one\n  # of these forms:\n  #   LongType\n  #       ::LongTypeContinued &identifier\n  #   LongType::\n  #       LongTypeContinued &identifier\n  #   LongType<\n  #       ...>::LongTypeContinued &identifier\n  #\n  # If we detected a type split across two lines, join the previous\n  # line to current line so that we can match const references\n  # accordingly.\n  #\n  # Note that this only scans back one line, since scanning back\n  # arbitrary number of lines would be expensive.  If you have a type\n  # that spans more than 2 lines, please use a typedef.\n  if linenum > 1:\n    previous = None\n    if Match(r'\\s*::(?:[\\w<>]|::)+\\s*&\\s*\\S', line):\n      # previous_line\\n + ::current_line\n      previous = Search(r'\\b((?:const\\s*)?(?:[\\w<>]|::)+[\\w<>])\\s*$',\n                        clean_lines.elided[linenum - 1])\n    elif Match(r'\\s*[a-zA-Z_]([\\w<>]|::)+\\s*&\\s*\\S', line):\n      # previous_line::\\n + current_line\n      previous = Search(r'\\b((?:const\\s*)?(?:[\\w<>]|::)+::)\\s*$',\n                        clean_lines.elided[linenum - 1])\n    if previous:\n      line = previous.group(1) + line.lstrip()\n    else:\n      # Check for templated parameter that is split across multiple lines\n      endpos = line.rfind('>')\n      if endpos > -1:\n        (_, startline, startpos) = ReverseCloseExpression(\n            clean_lines, linenum, endpos)\n        if startpos > -1 and startline < linenum:\n          # Found the matching < on an earlier line, collect all\n          # pieces up to current line.\n          line = ''\n          for i in xrange(startline, linenum + 1):\n            line += clean_lines.elided[i].strip()\n\n  # Check for non-const references in function parameters.  A single '&' may\n  # found in the following places:\n  #   inside expression: binary & for bitwise AND\n  #   inside expression: unary & for taking the address of something\n  #   inside declarators: reference parameter\n  # We will exclude the first two cases by checking that we are not inside a\n  # function body, including one that was just introduced by a trailing '{'.\n  # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].\n  if (nesting_state.previous_stack_top and\n      not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or\n           isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):\n    # Not at toplevel, not within a class, and not within a namespace\n    return\n\n  # Avoid initializer lists.  We only need to scan back from the\n  # current line for something that starts with ':'.\n  #\n  # We don't need to check the current line, since the '&' would\n  # appear inside the second set of parentheses on the current line as\n  # opposed to the first set.\n  if linenum > 0:\n    for i in xrange(linenum - 1, max(0, linenum - 10), -1):\n      previous_line = clean_lines.elided[i]\n      if not Search(r'[),]\\s*$', previous_line):\n        break\n      if Match(r'^\\s*:\\s+\\S', previous_line):\n        return\n\n  # Avoid preprocessors\n  if Search(r'\\\\\\s*$', line):\n    return\n\n  # Avoid constructor initializer lists\n  if IsInitializerList(clean_lines, linenum):\n    return\n\n  # We allow non-const references in a few standard places, like functions\n  # called \"swap()\" or iostream operators like \"<<\" or \">>\".  Do not check\n  # those function parameters.\n  #\n  # We also accept & in static_assert, which looks like a function but\n  # it's actually a declaration expression.\n  allowed_functions = (r'(?:[sS]wap(?:<\\w:+>)?|'\n                           r'operator\\s*[<>][<>]|'\n                           r'static_assert|COMPILE_ASSERT'\n                           r')\\s*\\(')\n  if Search(allowed_functions, line):\n    return\n  elif not Search(r'\\S+\\([^)]*$', line):\n    # Don't see an allowed function entry on this line.  Actually we\n    # didn't see any function name on this line, so this is likely a\n    # multi-line parameter list.  Try a bit harder to catch this case.\n    for i in xrange(2):\n      if (linenum > i and\n          Search(allowed_functions, clean_lines.elided[linenum - i - 1])):\n        return\n\n  decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body\n  for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):\n    if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and\n        not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):\n      error(filename, linenum, 'runtime/references', 2,\n            'Is this a non-const reference? '\n            'If so, make const or use a pointer: ' +\n            ReplaceAll(' *<', '<', parameter))\n\n\ndef CheckCasts(filename, clean_lines, linenum, error):\n  \"\"\"Various cast related checks.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  # Check to see if they're using an conversion function cast.\n  # I just try to capture the most common basic types, though there are more.\n  # Parameterless conversion functions, such as bool(), are allowed as they are\n  # probably a member operator declaration or default constructor.\n  match = Search(\n      r'(\\bnew\\s+(?:const\\s+)?|\\S<\\s*(?:const\\s+)?)?\\b'\n      r'(int|float|double|bool|char|int32|uint32|int64|uint64)'\n      r'(\\([^)].*)', line)\n  expecting_function = ExpectingFunctionArgs(clean_lines, linenum)\n  if match and not expecting_function:\n    matched_type = match.group(2)\n\n    # matched_new_or_template is used to silence two false positives:\n    # - New operators\n    # - Template arguments with function types\n    #\n    # For template arguments, we match on types immediately following\n    # an opening bracket without any spaces.  This is a fast way to\n    # silence the common case where the function type is the first\n    # template argument.  False negative with less-than comparison is\n    # avoided because those operators are usually followed by a space.\n    #\n    #   function<double(double)>   // bracket + no space = false positive\n    #   value < double(42)         // bracket + space = true positive\n    matched_new_or_template = match.group(1)\n\n    # Avoid arrays by looking for brackets that come after the closing\n    # parenthesis.\n    if Match(r'\\([^()]+\\)\\s*\\[', match.group(3)):\n      return\n\n    # Other things to ignore:\n    # - Function pointers\n    # - Casts to pointer types\n    # - Placement new\n    # - Alias declarations\n    matched_funcptr = match.group(3)\n    if (matched_new_or_template is None and\n        not (matched_funcptr and\n             (Match(r'\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\(',\n                    matched_funcptr) or\n              matched_funcptr.startswith('(*)'))) and\n        not Match(r'\\s*using\\s+\\S+\\s*=\\s*' + matched_type, line) and\n        not Search(r'new\\(\\S+\\)\\s*' + matched_type, line)):\n      error(filename, linenum, 'readability/casting', 4,\n            'Using deprecated casting style.  '\n            'Use static_cast<%s>(...) instead' %\n            matched_type)\n\n  if not expecting_function:\n    CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',\n                    r'\\((int|float|double|bool|char|u?int(16|32|64))\\)', error)\n\n  # This doesn't catch all cases. Consider (const char * const)\"hello\".\n  #\n  # (char *) \"foo\" should always be a const_cast (reinterpret_cast won't\n  # compile).\n  if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',\n                     r'\\((char\\s?\\*+\\s?)\\)\\s*\"', error):\n    pass\n  else:\n    # Check pointer casts for other than string constants\n    CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',\n                    r'\\((\\w+\\s?\\*+\\s?)\\)', error)\n\n  # In addition, we look for people taking the address of a cast.  This\n  # is dangerous -- casts can assign to temporaries, so the pointer doesn't\n  # point where you think.\n  #\n  # Some non-identifier character is required before the '&' for the\n  # expression to be recognized as a cast.  These are casts:\n  #   expression = &static_cast<int*>(temporary());\n  #   function(&(int*)(temporary()));\n  #\n  # This is not a cast:\n  #   reference_type&(int* function_param);\n  match = Search(\n      r'(?:[^\\w]&\\(([^)*][^)]*)\\)[\\w(])|'\n      r'(?:[^\\w]&(static|dynamic|down|reinterpret)_cast\\b)', line)\n  if match:\n    # Try a better error message when the & is bound to something\n    # dereferenced by the casted pointer, as opposed to the casted\n    # pointer itself.\n    parenthesis_error = False\n    match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\\b)<', line)\n    if match:\n      _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))\n      if x1 >= 0 and clean_lines.elided[y1][x1] == '(':\n        _, y2, x2 = CloseExpression(clean_lines, y1, x1)\n        if x2 >= 0:\n          extended_line = clean_lines.elided[y2][x2:]\n          if y2 < clean_lines.NumLines() - 1:\n            extended_line += clean_lines.elided[y2 + 1]\n          if Match(r'\\s*(?:->|\\[)', extended_line):\n            parenthesis_error = True\n\n    if parenthesis_error:\n      error(filename, linenum, 'readability/casting', 4,\n            ('Are you taking an address of something dereferenced '\n             'from a cast?  Wrapping the dereferenced expression in '\n             'parentheses will make the binding more obvious'))\n    else:\n      error(filename, linenum, 'runtime/casting', 4,\n            ('Are you taking an address of a cast?  '\n             'This is dangerous: could be a temp var.  '\n             'Take the address before doing the cast, rather than after'))\n\n\ndef CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):\n  \"\"\"Checks for a C-style cast by looking for the pattern.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    cast_type: The string for the C++ cast to recommend.  This is either\n      reinterpret_cast, static_cast, or const_cast, depending.\n    pattern: The regular expression used to find C-style casts.\n    error: The function to call with any errors found.\n\n  Returns:\n    True if an error was emitted.\n    False otherwise.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  match = Search(pattern, line)\n  if not match:\n    return False\n\n  # Exclude lines with keywords that tend to look like casts\n  context = line[0:match.start(1) - 1]\n  if Match(r'.*\\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\\s*$', context):\n    return False\n\n  # Try expanding current context to see if we one level of\n  # parentheses inside a macro.\n  if linenum > 0:\n    for i in xrange(linenum - 1, max(0, linenum - 5), -1):\n      context = clean_lines.elided[i] + context\n  if Match(r'.*\\b[_A-Z][_A-Z0-9]*\\s*\\((?:\\([^()]*\\)|[^()])*$', context):\n    return False\n\n  # operator++(int) and operator--(int)\n  if context.endswith(' operator++') or context.endswith(' operator--'):\n    return False\n\n  # A single unnamed argument for a function tends to look like old style cast.\n  # If we see those, don't issue warnings for deprecated casts.\n  remainder = line[match.end(0):]\n  if Match(r'^\\s*(?:;|const\\b|throw\\b|final\\b|override\\b|[=>{),]|->)',\n           remainder):\n    return False\n\n  # At this point, all that should be left is actual casts.\n  error(filename, linenum, 'readability/casting', 4,\n        'Using C-style cast.  Use %s<%s>(...) instead' %\n        (cast_type, match.group(1)))\n\n  return True\n\n\ndef ExpectingFunctionArgs(clean_lines, linenum):\n  \"\"\"Checks whether where function type arguments are expected.\n\n  Args:\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n\n  Returns:\n    True if the line at 'linenum' is inside something that expects arguments\n    of function types.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  return (Match(r'^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\(', line) or\n          (linenum >= 2 and\n           (Match(r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$',\n                  clean_lines.elided[linenum - 1]) or\n            Match(r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$',\n                  clean_lines.elided[linenum - 2]) or\n            Search(r'\\bstd::m?function\\s*\\<\\s*$',\n                   clean_lines.elided[linenum - 1]))))\n\n\n_HEADERS_CONTAINING_TEMPLATES = (\n    ('<deque>', ('deque',)),\n    ('<functional>', ('unary_function', 'binary_function',\n                      'plus', 'minus', 'multiplies', 'divides', 'modulus',\n                      'negate',\n                      'equal_to', 'not_equal_to', 'greater', 'less',\n                      'greater_equal', 'less_equal',\n                      'logical_and', 'logical_or', 'logical_not',\n                      'unary_negate', 'not1', 'binary_negate', 'not2',\n                      'bind1st', 'bind2nd',\n                      'pointer_to_unary_function',\n                      'pointer_to_binary_function',\n                      'ptr_fun',\n                      'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',\n                      'mem_fun_ref_t',\n                      'const_mem_fun_t', 'const_mem_fun1_t',\n                      'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',\n                      'mem_fun_ref',\n                     )),\n    ('<limits>', ('numeric_limits',)),\n    ('<list>', ('list',)),\n    ('<map>', ('map', 'multimap',)),\n    ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',\n                  'unique_ptr', 'weak_ptr')),\n    ('<queue>', ('queue', 'priority_queue',)),\n    ('<set>', ('set', 'multiset',)),\n    ('<stack>', ('stack',)),\n    ('<string>', ('char_traits', 'basic_string',)),\n    ('<tuple>', ('tuple',)),\n    ('<unordered_map>', ('unordered_map', 'unordered_multimap')),\n    ('<unordered_set>', ('unordered_set', 'unordered_multiset')),\n    ('<utility>', ('pair',)),\n    ('<vector>', ('vector',)),\n\n    # gcc extensions.\n    # Note: std::hash is their hash, ::hash is our hash\n    ('<hash_map>', ('hash_map', 'hash_multimap',)),\n    ('<hash_set>', ('hash_set', 'hash_multiset',)),\n    ('<slist>', ('slist',)),\n    )\n\n_HEADERS_MAYBE_TEMPLATES = (\n    ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',\n                     'transform',\n                    )),\n    ('<utility>', ('forward', 'make_pair', 'move', 'swap')),\n    )\n\n_RE_PATTERN_STRING = re.compile(r'\\bstring\\b')\n\n_re_pattern_headers_maybe_templates = []\nfor _header, _templates in _HEADERS_MAYBE_TEMPLATES:\n  for _template in _templates:\n    # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or\n    # type::max().\n    _re_pattern_headers_maybe_templates.append(\n        (re.compile(r'[^>.]\\b' + _template + r'(<.*?>)?\\([^\\)]'),\n            _template,\n            _header))\n\n# Other scripts may reach in and modify this pattern.\n_re_pattern_templates = []\nfor _header, _templates in _HEADERS_CONTAINING_TEMPLATES:\n  for _template in _templates:\n    _re_pattern_templates.append(\n        (re.compile(r'(\\<|\\b)' + _template + r'\\s*\\<'),\n         _template + '<>',\n         _header))\n\n\ndef FilesBelongToSameModule(filename_cc, filename_h):\n  \"\"\"Check if these two filenames belong to the same module.\n\n  The concept of a 'module' here is a as follows:\n  foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the\n  same 'module' if they are in the same directory.\n  some/path/public/xyzzy and some/path/internal/xyzzy are also considered\n  to belong to the same module here.\n\n  If the filename_cc contains a longer path than the filename_h, for example,\n  '/absolute/path/to/base/sysinfo.cc', and this file would include\n  'base/sysinfo.h', this function also produces the prefix needed to open the\n  header. This is used by the caller of this function to more robustly open the\n  header file. We don't have access to the real include paths in this context,\n  so we need this guesswork here.\n\n  Known bugs: tools/base/bar.cc and base/bar.h belong to the same module\n  according to this implementation. Because of this, this function gives\n  some false positives. This should be sufficiently rare in practice.\n\n  Args:\n    filename_cc: is the path for the .cc file\n    filename_h: is the path for the header path\n\n  Returns:\n    Tuple with a bool and a string:\n    bool: True if filename_cc and filename_h belong to the same module.\n    string: the additional prefix needed to open the header file.\n  \"\"\"\n\n  fileinfo = FileInfo(filename_cc)\n  if not fileinfo.IsSource():\n    return (False, '')\n  filename_cc = filename_cc[:-len(fileinfo.Extension())]\n  matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo.BaseName())\n  if matched_test_suffix:\n    filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]\n  filename_cc = filename_cc.replace('/public/', '/')\n  filename_cc = filename_cc.replace('/internal/', '/')\n\n  if not filename_h.endswith('.h'):\n    return (False, '')\n  filename_h = filename_h[:-len('.h')]\n  if filename_h.endswith('-inl'):\n    filename_h = filename_h[:-len('-inl')]\n  filename_h = filename_h.replace('/public/', '/')\n  filename_h = filename_h.replace('/internal/', '/')\n\n  files_belong_to_same_module = filename_cc.endswith(filename_h)\n  common_path = ''\n  if files_belong_to_same_module:\n    common_path = filename_cc[:-len(filename_h)]\n  return files_belong_to_same_module, common_path\n\n\ndef UpdateIncludeState(filename, include_dict, io=codecs):\n  \"\"\"Fill up the include_dict with new includes found from the file.\n\n  Args:\n    filename: the name of the header to read.\n    include_dict: a dictionary in which the headers are inserted.\n    io: The io factory to use to read the file. Provided for testability.\n\n  Returns:\n    True if a header was successfully added. False otherwise.\n  \"\"\"\n  headerfile = None\n  try:\n    headerfile = io.open(filename, 'r', 'utf8', 'replace')\n  except IOError:\n    return False\n  linenum = 0\n  for line in headerfile:\n    linenum += 1\n    clean_line = CleanseComments(line)\n    match = _RE_PATTERN_INCLUDE.search(clean_line)\n    if match:\n      include = match.group(2)\n      include_dict.setdefault(include, linenum)\n  return True\n\n\ndef CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,\n                              io=codecs):\n  \"\"\"Reports for missing stl includes.\n\n  This function will output warnings to make sure you are including the headers\n  necessary for the stl containers and functions that you use. We only give one\n  reason to include a header. For example, if you use both equal_to<> and\n  less<> in a .h file, only one (the latter in the file) of these will be\n  reported as a reason to include the <functional>.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    include_state: An _IncludeState instance.\n    error: The function to call with any errors found.\n    io: The IO factory to use to read the header file. Provided for unittest\n        injection.\n  \"\"\"\n  required = {}  # A map of header name to linenumber and the template entity.\n                 # Example of required: { '<functional>': (1219, 'less<>') }\n\n  for linenum in xrange(clean_lines.NumLines()):\n    line = clean_lines.elided[linenum]\n    if not line or line[0] == '#':\n      continue\n\n    # String is special -- it is a non-templatized type in STL.\n    matched = _RE_PATTERN_STRING.search(line)\n    if matched:\n      # Don't warn about strings in non-STL namespaces:\n      # (We check only the first match per line; good enough.)\n      prefix = line[:matched.start()]\n      if prefix.endswith('std::') or not prefix.endswith('::'):\n        required['<string>'] = (linenum, 'string')\n\n    for pattern, template, header in _re_pattern_headers_maybe_templates:\n      if pattern.search(line):\n        required[header] = (linenum, template)\n\n    # The following function is just a speed up, no semantics are changed.\n    if not '<' in line:  # Reduces the cpu time usage by skipping lines.\n      continue\n\n    for pattern, template, header in _re_pattern_templates:\n      matched = pattern.search(line)\n      if matched:\n        # Don't warn about IWYU in non-STL namespaces:\n        # (We check only the first match per line; good enough.)\n        prefix = line[:matched.start()]\n        if prefix.endswith('std::') or not prefix.endswith('::'):\n          required[header] = (linenum, template)\n\n  # The policy is that if you #include something in foo.h you don't need to\n  # include it again in foo.cc. Here, we will look at possible includes.\n  # Let's flatten the include_state include_list and copy it into a dictionary.\n  include_dict = dict([item for sublist in include_state.include_list\n                       for item in sublist])\n\n  # Did we find the header for this file (if any) and successfully load it?\n  header_found = False\n\n  # Use the absolute path so that matching works properly.\n  abs_filename = FileInfo(filename).FullName()\n\n  # For Emacs's flymake.\n  # If cpplint is invoked from Emacs's flymake, a temporary file is generated\n  # by flymake and that file name might end with '_flymake.cc'. In that case,\n  # restore original file name here so that the corresponding header file can be\n  # found.\n  # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'\n  # instead of 'foo_flymake.h'\n  abs_filename = re.sub(r'_flymake\\.cc$', '.cc', abs_filename)\n\n  # include_dict is modified during iteration, so we iterate over a copy of\n  # the keys.\n  header_keys = include_dict.keys()\n  for header in header_keys:\n    (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)\n    fullpath = common_path + header\n    if same_module and UpdateIncludeState(fullpath, include_dict, io):\n      header_found = True\n\n  # If we can't find the header file for a .cc, assume it's because we don't\n  # know where to look. In that case we'll give up as we're not sure they\n  # didn't include it in the .h file.\n  # TODO(unknown): Do a better job of finding .h files so we are confident that\n  # not having the .h file means there isn't one.\n  if filename.endswith('.cc') and not header_found:\n    return\n\n  # All the lines have been processed, report the errors found.\n  for required_header_unstripped in required:\n    template = required[required_header_unstripped][1]\n    if required_header_unstripped.strip('<>\"') not in include_dict:\n      error(filename, required[required_header_unstripped][0],\n            'build/include_what_you_use', 4,\n            'Add #include ' + required_header_unstripped + ' for ' + template)\n\n\n_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\\bmake_pair\\s*<')\n\n\ndef CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):\n  \"\"\"Check that make_pair's template arguments are deduced.\n\n  G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are\n  specified explicitly, and such use isn't intended in any case.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n  match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)\n  if match:\n    error(filename, linenum, 'build/explicit_make_pair',\n          4,  # 4 = high confidence\n          'For C++11-compatibility, omit template arguments from make_pair'\n          ' OR use pair directly OR if appropriate, construct a pair directly')\n\n\ndef CheckRedundantVirtual(filename, clean_lines, linenum, error):\n  \"\"\"Check if line contains a redundant \"virtual\" function-specifier.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  # Look for \"virtual\" on current line.\n  line = clean_lines.elided[linenum]\n  virtual = Match(r'^(.*)(\\bvirtual\\b)(.*)$', line)\n  if not virtual: return\n\n  # Ignore \"virtual\" keywords that are near access-specifiers.  These\n  # are only used in class base-specifier and do not apply to member\n  # functions.\n  if (Search(r'\\b(public|protected|private)\\s+$', virtual.group(1)) or\n      Match(r'^\\s+(public|protected|private)\\b', virtual.group(3))):\n    return\n\n  # Ignore the \"virtual\" keyword from virtual base classes.  Usually\n  # there is a column on the same line in these cases (virtual base\n  # classes are rare in google3 because multiple inheritance is rare).\n  if Match(r'^.*[^:]:[^:].*$', line): return\n\n  # Look for the next opening parenthesis.  This is the start of the\n  # parameter list (possibly on the next line shortly after virtual).\n  # TODO(unknown): doesn't work if there are virtual functions with\n  # decltype() or other things that use parentheses, but csearch suggests\n  # that this is rare.\n  end_col = -1\n  end_line = -1\n  start_col = len(virtual.group(2))\n  for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):\n    line = clean_lines.elided[start_line][start_col:]\n    parameter_list = Match(r'^([^(]*)\\(', line)\n    if parameter_list:\n      # Match parentheses to find the end of the parameter list\n      (_, end_line, end_col) = CloseExpression(\n          clean_lines, start_line, start_col + len(parameter_list.group(1)))\n      break\n    start_col = 0\n\n  if end_col < 0:\n    return  # Couldn't find end of parameter list, give up\n\n  # Look for \"override\" or \"final\" after the parameter list\n  # (possibly on the next few lines).\n  for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):\n    line = clean_lines.elided[i][end_col:]\n    match = Search(r'\\b(override|final)\\b', line)\n    if match:\n      error(filename, linenum, 'readability/inheritance', 4,\n            ('\"virtual\" is redundant since function is '\n             'already declared as \"%s\"' % match.group(1)))\n\n    # Set end_col to check whole lines after we are done with the\n    # first line.\n    end_col = 0\n    if Search(r'[^\\w]\\s*$', line):\n      break\n\n\ndef CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):\n  \"\"\"Check if line contains a redundant \"override\" or \"final\" virt-specifier.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  # Look for closing parenthesis nearby.  We need one to confirm where\n  # the declarator ends and where the virt-specifier starts to avoid\n  # false positives.\n  line = clean_lines.elided[linenum]\n  declarator_end = line.rfind(')')\n  if declarator_end >= 0:\n    fragment = line[declarator_end:]\n  else:\n    if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:\n      fragment = line\n    else:\n      return\n\n  # Check that at most one of \"override\" or \"final\" is present, not both\n  if Search(r'\\boverride\\b', fragment) and Search(r'\\bfinal\\b', fragment):\n    error(filename, linenum, 'readability/inheritance', 4,\n          ('\"override\" is redundant since function is '\n           'already declared as \"final\"'))\n\n\n\n\n# Returns true if we are at a new block, and it is directly\n# inside of a namespace.\ndef IsBlockInNameSpace(nesting_state, is_forward_declaration):\n  \"\"\"Checks that the new block is directly in a namespace.\n\n  Args:\n    nesting_state: The _NestingState object that contains info about our state.\n    is_forward_declaration: If the class is a forward declared class.\n  Returns:\n    Whether or not the new block is directly in a namespace.\n  \"\"\"\n  if is_forward_declaration:\n    if len(nesting_state.stack) >= 1 and (\n        isinstance(nesting_state.stack[-1], _NamespaceInfo)):\n      return True\n    else:\n      return False\n\n  return (len(nesting_state.stack) > 1 and\n          nesting_state.stack[-1].check_namespace_indentation and\n          isinstance(nesting_state.stack[-2], _NamespaceInfo))\n\n\ndef ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,\n                                    raw_lines_no_comments, linenum):\n  \"\"\"This method determines if we should apply our namespace indentation check.\n\n  Args:\n    nesting_state: The current nesting state.\n    is_namespace_indent_item: If we just put a new class on the stack, True.\n      If the top of the stack is not a class, or we did not recently\n      add the class, False.\n    raw_lines_no_comments: The lines without the comments.\n    linenum: The current line number we are processing.\n\n  Returns:\n    True if we should apply our namespace indentation check. Currently, it\n    only works for classes and namespaces inside of a namespace.\n  \"\"\"\n\n  is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,\n                                                     linenum)\n\n  if not (is_namespace_indent_item or is_forward_declaration):\n    return False\n\n  # If we are in a macro, we do not want to check the namespace indentation.\n  if IsMacroDefinition(raw_lines_no_comments, linenum):\n    return False\n\n  return IsBlockInNameSpace(nesting_state, is_forward_declaration)\n\n\n# Call this method if the line is directly inside of a namespace.\n# If the line above is blank (excluding comments) or the start of\n# an inner namespace, it cannot be indented.\ndef CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,\n                                    error):\n  line = raw_lines_no_comments[linenum]\n  if Match(r'^\\s+', line):\n    error(filename, linenum, 'runtime/indentation_namespace', 4,\n          'Do not indent within a namespace')\n\n\ndef ProcessLine(filename, file_extension, clean_lines, line,\n                include_state, function_state, nesting_state, error,\n                extra_check_functions=[]):\n  \"\"\"Processes a single line in the file.\n\n  Args:\n    filename: Filename of the file that is being processed.\n    file_extension: The extension (dot not included) of the file.\n    clean_lines: An array of strings, each representing a line of the file,\n                 with comments stripped.\n    line: Number of line being processed.\n    include_state: An _IncludeState instance in which the headers are inserted.\n    function_state: A _FunctionState instance which counts function lines, etc.\n    nesting_state: A NestingState instance which maintains information about\n                   the current stack of nested blocks being parsed.\n    error: A callable to which errors are reported, which takes 4 arguments:\n           filename, line number, error level, and message\n    extra_check_functions: An array of additional check functions that will be\n                           run on each source line. Each function takes 4\n                           arguments: filename, clean_lines, line, error\n  \"\"\"\n  raw_lines = clean_lines.raw_lines\n  ParseNolintSuppressions(filename, raw_lines[line], line, error)\n  nesting_state.Update(filename, clean_lines, line, error)\n  CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,\n                               error)\n  if nesting_state.InAsmBlock(): return\n  CheckForFunctionLengths(filename, clean_lines, line, function_state, error)\n  CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)\n  CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)\n  CheckLanguage(filename, clean_lines, line, file_extension, include_state,\n                nesting_state, error)\n  CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)\n  CheckForNonStandardConstructs(filename, clean_lines, line,\n                                nesting_state, error)\n  CheckVlogArguments(filename, clean_lines, line, error)\n  CheckPosixThreading(filename, clean_lines, line, error)\n  CheckInvalidIncrement(filename, clean_lines, line, error)\n  CheckMakePairUsesDeduction(filename, clean_lines, line, error)\n  CheckRedundantVirtual(filename, clean_lines, line, error)\n  CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)\n  for check_fn in extra_check_functions:\n    check_fn(filename, clean_lines, line, error)\n\ndef FlagCxx11Features(filename, clean_lines, linenum, error):\n  \"\"\"Flag those c++11 features that we only allow in certain places.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  include = Match(r'\\s*#\\s*include\\s+[<\"]([^<\"]+)[\">]', line)\n\n  # Flag unapproved C++ TR1 headers.\n  if include and include.group(1).startswith('tr1/'):\n    error(filename, linenum, 'build/c++tr1', 5,\n          ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))\n\n  # Flag unapproved C++11 headers.\n  if include and include.group(1) in ('cfenv',\n                                      'condition_variable',\n                                      'fenv.h',\n                                      'future',\n                                      'mutex',\n                                      'thread',\n                                      'chrono',\n                                      'ratio',\n                                      'regex',\n                                      'system_error',\n                                     ):\n    error(filename, linenum, 'build/c++11', 5,\n          ('<%s> is an unapproved C++11 header.') % include.group(1))\n\n  # The only place where we need to worry about C++11 keywords and library\n  # features in preprocessor directives is in macro definitions.\n  if Match(r'\\s*#', line) and not Match(r'\\s*#\\s*define\\b', line): return\n\n  # These are classes and free functions.  The classes are always\n  # mentioned as std::*, but we only catch the free functions if\n  # they're not found by ADL.  They're alphabetical by header.\n  for top_name in (\n      # type_traits\n      'alignment_of',\n      'aligned_union',\n      ):\n    if Search(r'\\bstd::%s\\b' % top_name, line):\n      error(filename, linenum, 'build/c++11', 5,\n            ('std::%s is an unapproved C++11 class or function.  Send c-style '\n             'an example of where it would make your code more readable, and '\n             'they may let you use it.') % top_name)\n\n\ndef FlagCxx14Features(filename, clean_lines, linenum, error):\n  \"\"\"Flag those C++14 features that we restrict.\n\n  Args:\n    filename: The name of the current file.\n    clean_lines: A CleansedLines instance containing the file.\n    linenum: The number of the line to check.\n    error: The function to call with any errors found.\n  \"\"\"\n  line = clean_lines.elided[linenum]\n\n  include = Match(r'\\s*#\\s*include\\s+[<\"]([^<\"]+)[\">]', line)\n\n  # Flag unapproved C++14 headers.\n  if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):\n    error(filename, linenum, 'build/c++14', 5,\n          ('<%s> is an unapproved C++14 header.') % include.group(1))\n\n\ndef ProcessFileData(filename, file_extension, lines, error,\n                    extra_check_functions=[]):\n  \"\"\"Performs lint checks and reports any errors to the given error function.\n\n  Args:\n    filename: Filename of the file that is being processed.\n    file_extension: The extension (dot not included) of the file.\n    lines: An array of strings, each representing a line of the file, with the\n           last element being empty if the file is terminated with a newline.\n    error: A callable to which errors are reported, which takes 4 arguments:\n           filename, line number, error level, and message\n    extra_check_functions: An array of additional check functions that will be\n                           run on each source line. Each function takes 4\n                           arguments: filename, clean_lines, line, error\n  \"\"\"\n  lines = (['// marker so line numbers and indices both start at 1'] + lines +\n           ['// marker so line numbers end in a known way'])\n\n  include_state = _IncludeState()\n  function_state = _FunctionState()\n  nesting_state = NestingState()\n\n  ResetNolintSuppressions()\n\n  CheckForCopyright(filename, lines, error)\n  ProcessGlobalSuppresions(lines)\n  RemoveMultiLineComments(filename, lines, error)\n  clean_lines = CleansedLines(lines)\n\n  if IsHeaderExtension(file_extension):\n    CheckForHeaderGuard(filename, clean_lines, error)\n\n  for line in xrange(clean_lines.NumLines()):\n    ProcessLine(filename, file_extension, clean_lines, line,\n                include_state, function_state, nesting_state, error,\n                extra_check_functions)\n    FlagCxx11Features(filename, clean_lines, line, error)\n  nesting_state.CheckCompletedBlocks(filename, error)\n\n  CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)\n\n  # Check that the .cc file has included its header if it exists.\n  if _IsSourceExtension(file_extension):\n    CheckHeaderFileIncluded(filename, include_state, error)\n\n  # We check here rather than inside ProcessLine so that we see raw\n  # lines rather than \"cleaned\" lines.\n  CheckForBadCharacters(filename, lines, error)\n\n  CheckForNewlineAtEOF(filename, lines, error)\n\ndef ProcessConfigOverrides(filename):\n  \"\"\" Loads the configuration files and processes the config overrides.\n\n  Args:\n    filename: The name of the file being processed by the linter.\n\n  Returns:\n    False if the current |filename| should not be processed further.\n  \"\"\"\n\n  abs_filename = os.path.abspath(filename)\n  cfg_filters = []\n  keep_looking = True\n  while keep_looking:\n    abs_path, base_name = os.path.split(abs_filename)\n    if not base_name:\n      break  # Reached the root directory.\n\n    cfg_file = os.path.join(abs_path, \"CPPLINT.cfg\")\n    abs_filename = abs_path\n    if not os.path.isfile(cfg_file):\n      continue\n\n    try:\n      with open(cfg_file) as file_handle:\n        for line in file_handle:\n          line, _, _ = line.partition('#')  # Remove comments.\n          if not line.strip():\n            continue\n\n          name, _, val = line.partition('=')\n          name = name.strip()\n          val = val.strip()\n          if name == 'set noparent':\n            keep_looking = False\n          elif name == 'filter':\n            cfg_filters.append(val)\n          elif name == 'exclude_files':\n            # When matching exclude_files pattern, use the base_name of\n            # the current file name or the directory name we are processing.\n            # For example, if we are checking for lint errors in /foo/bar/baz.cc\n            # and we found the .cfg file at /foo/CPPLINT.cfg, then the config\n            # file's \"exclude_files\" filter is meant to be checked against \"bar\"\n            # and not \"baz\" nor \"bar/baz.cc\".\n            if base_name:\n              pattern = re.compile(val)\n              if pattern.match(base_name):\n                if _cpplint_state.quiet:\n                  # Suppress \"Ignoring file\" warning when using --quiet.\n                  return False\n                sys.stderr.write('Ignoring \"%s\": file excluded by \"%s\". '\n                                 'File path component \"%s\" matches '\n                                 'pattern \"%s\"\\n' %\n                                 (filename, cfg_file, base_name, val))\n                return False\n          elif name == 'linelength':\n            global _line_length\n            try:\n                _line_length = int(val)\n            except ValueError:\n                sys.stderr.write('Line length must be numeric.')\n          elif name == 'root':\n            global _root\n            # root directories are specified relative to CPPLINT.cfg dir.\n            _root = os.path.join(os.path.dirname(cfg_file), val)\n          elif name == 'headers':\n            ProcessHppHeadersOption(val)\n          else:\n            sys.stderr.write(\n                'Invalid configuration option (%s) in file %s\\n' %\n                (name, cfg_file))\n\n    except IOError:\n      sys.stderr.write(\n          \"Skipping config file '%s': Can't open for reading\\n\" % cfg_file)\n      keep_looking = False\n\n  # Apply all the accumulated filters in reverse order (top-level directory\n  # config options having the least priority).\n  for filter in reversed(cfg_filters):\n     _AddFilters(filter)\n\n  return True\n\n\ndef ProcessFile(filename, vlevel, extra_check_functions=[]):\n  \"\"\"Does google-lint on a single file.\n\n  Args:\n    filename: The name of the file to parse.\n\n    vlevel: The level of errors to report.  Every error of confidence\n    >= verbose_level will be reported.  0 is a good default.\n\n    extra_check_functions: An array of additional check functions that will be\n                           run on each source line. Each function takes 4\n                           arguments: filename, clean_lines, line, error\n  \"\"\"\n\n  _SetVerboseLevel(vlevel)\n  _BackupFilters()\n  old_errors = _cpplint_state.error_count\n\n  if not ProcessConfigOverrides(filename):\n    _RestoreFilters()\n    return\n\n  lf_lines = []\n  crlf_lines = []\n  try:\n    # Support the UNIX convention of using \"-\" for stdin.  Note that\n    # we are not opening the file with universal newline support\n    # (which codecs doesn't support anyway), so the resulting lines do\n    # contain trailing '\\r' characters if we are reading a file that\n    # has CRLF endings.\n    # If after the split a trailing '\\r' is present, it is removed\n    # below.\n    if filename == '-':\n      lines = codecs.StreamReaderWriter(sys.stdin,\n                                        codecs.getreader('utf8'),\n                                        codecs.getwriter('utf8'),\n                                        'replace').read().split('\\n')\n    else:\n      lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\\n')\n\n    # Remove trailing '\\r'.\n    # The -1 accounts for the extra trailing blank line we get from split()\n    for linenum in range(len(lines) - 1):\n      if lines[linenum].endswith('\\r'):\n        lines[linenum] = lines[linenum].rstrip('\\r')\n        crlf_lines.append(linenum + 1)\n      else:\n        lf_lines.append(linenum + 1)\n\n  except IOError:\n    sys.stderr.write(\n        \"Skipping input '%s': Can't open for reading\\n\" % filename)\n    _RestoreFilters()\n    return\n\n  # Note, if no dot is found, this will give the entire filename as the ext.\n  file_extension = filename[filename.rfind('.') + 1:]\n\n  # When reading from stdin, the extension is unknown, so no cpplint tests\n  # should rely on the extension.\n  if filename != '-' and file_extension not in _valid_extensions:\n    sys.stderr.write('Ignoring %s; not a valid file name '\n                     '(%s)\\n' % (filename, ', '.join(_valid_extensions)))\n  else:\n    ProcessFileData(filename, file_extension, lines, Error,\n                    extra_check_functions)\n\n    # If end-of-line sequences are a mix of LF and CR-LF, issue\n    # warnings on the lines with CR.\n    #\n    # Don't issue any warnings if all lines are uniformly LF or CR-LF,\n    # since critique can handle these just fine, and the style guide\n    # doesn't dictate a particular end of line sequence.\n    #\n    # We can't depend on os.linesep to determine what the desired\n    # end-of-line sequence should be, since that will return the\n    # server-side end-of-line sequence.\n    if lf_lines and crlf_lines:\n      # Warn on every line with CR.  An alternative approach might be to\n      # check whether the file is mostly CRLF or just LF, and warn on the\n      # minority, we bias toward LF here since most tools prefer LF.\n      for linenum in crlf_lines:\n        Error(filename, linenum, 'whitespace/newline', 1,\n              'Unexpected \\\\r (^M) found; better to use only \\\\n')\n\n  # Suppress printing anything if --quiet was passed unless the error\n  # count has increased after processing this file.\n  if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:\n    sys.stdout.write('Done processing %s\\n' % filename)\n  _RestoreFilters()\n\n\ndef PrintUsage(message):\n  \"\"\"Prints a brief usage string and exits, optionally with an error message.\n\n  Args:\n    message: The optional error message.\n  \"\"\"\n  sys.stderr.write(_USAGE)\n  if message:\n    sys.exit('\\nFATAL ERROR: ' + message)\n  else:\n    sys.exit(1)\n\n\ndef PrintCategories():\n  \"\"\"Prints a list of all the error-categories used by error messages.\n\n  These are the categories used to filter messages via --filter.\n  \"\"\"\n  sys.stderr.write(''.join('  %s\\n' % cat for cat in _ERROR_CATEGORIES))\n  sys.exit(0)\n\n\ndef ParseArguments(args):\n  \"\"\"Parses the command line arguments.\n\n  This may set the output format and verbosity level as side-effects.\n\n  Args:\n    args: The command line arguments:\n\n  Returns:\n    The list of filenames to lint.\n  \"\"\"\n  try:\n    (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',\n                                                 'counting=',\n                                                 'filter=',\n                                                 'root=',\n                                                 'linelength=',\n                                                 'extensions=',\n                                                 'headers=',\n                                                 'quiet'])\n  except getopt.GetoptError:\n    PrintUsage('Invalid arguments.')\n\n  verbosity = _VerboseLevel()\n  output_format = _OutputFormat()\n  filters = ''\n  quiet = _Quiet()\n  counting_style = ''\n\n  for (opt, val) in opts:\n    if opt == '--help':\n      PrintUsage(None)\n    elif opt == '--output':\n      if val not in ('emacs', 'vs7', 'eclipse'):\n        PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')\n      output_format = val\n    elif opt == '--quiet':\n      quiet = True\n    elif opt == '--verbose':\n      verbosity = int(val)\n    elif opt == '--filter':\n      filters = val\n      if not filters:\n        PrintCategories()\n    elif opt == '--counting':\n      if val not in ('total', 'toplevel', 'detailed'):\n        PrintUsage('Valid counting options are total, toplevel, and detailed')\n      counting_style = val\n    elif opt == '--root':\n      global _root\n      _root = val\n    elif opt == '--linelength':\n      global _line_length\n      try:\n          _line_length = int(val)\n      except ValueError:\n          PrintUsage('Line length must be digits.')\n    elif opt == '--extensions':\n      global _valid_extensions\n      try:\n          _valid_extensions = set(val.split(','))\n      except ValueError:\n          PrintUsage('Extensions must be comma separated list.')\n    elif opt == '--headers':\n      ProcessHppHeadersOption(val)\n\n  if not filenames:\n    PrintUsage('No files were specified.')\n\n  _SetOutputFormat(output_format)\n  _SetQuiet(quiet)\n  _SetVerboseLevel(verbosity)\n  _SetFilters(filters)\n  _SetCountingStyle(counting_style)\n\n  return filenames\n\n\ndef main():\n  filenames = ParseArguments(sys.argv[1:])\n\n  # Change stderr to write with replacement characters so we don't die\n  # if we try to print something containing non-ASCII characters.\n  sys.stderr = codecs.StreamReaderWriter(sys.stderr,\n                                         codecs.getreader('utf8'),\n                                         codecs.getwriter('utf8'),\n                                         'replace')\n\n  _cpplint_state.ResetErrorCounts()\n  for filename in filenames:\n    ProcessFile(filename, _cpplint_state.verbose_level)\n  # If --quiet is passed, suppress printing error count unless there are errors.\n  if not _cpplint_state.quiet or _cpplint_state.error_count > 0:\n    _cpplint_state.PrintErrorCounts()\n\n  sys.exit(_cpplint_state.error_count > 0)\n\n\nif __name__ == '__main__':\n  main()\n"
  },
  {
    "path": "runtime/sanitizer_common/scripts/gen_dynamic_list.py",
    "content": "#!/usr/bin/env python\n#===- lib/sanitizer_common/scripts/gen_dynamic_list.py ---------------------===#\n#\n# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n#===------------------------------------------------------------------------===#\n#\n# Generates the list of functions that should be exported from sanitizer\n# runtimes. The output format is recognized by --dynamic-list linker option.\n# Usage:\n#   gen_dynamic_list.py libclang_rt.*san*.a [ files ... ]\n#\n#===------------------------------------------------------------------------===#\nfrom __future__ import print_function\nimport argparse\nimport os\nimport re\nimport subprocess\nimport sys\nimport platform\n\nnew_delete = set([\n                  '_Znam', '_ZnamRKSt9nothrow_t',    # operator new[](unsigned long)\n                  '_Znwm', '_ZnwmRKSt9nothrow_t',    # operator new(unsigned long)\n                  '_Znaj', '_ZnajRKSt9nothrow_t',    # operator new[](unsigned int)\n                  '_Znwj', '_ZnwjRKSt9nothrow_t',    # operator new(unsigned int)\n                  # operator new(unsigned long, std::align_val_t)\n                  '_ZnwmSt11align_val_t', '_ZnwmSt11align_val_tRKSt9nothrow_t',\n                  # operator new(unsigned int, std::align_val_t)\n                  '_ZnwjSt11align_val_t', '_ZnwjSt11align_val_tRKSt9nothrow_t',\n                  # operator new[](unsigned long, std::align_val_t)\n                  '_ZnamSt11align_val_t', '_ZnamSt11align_val_tRKSt9nothrow_t',\n                  # operator new[](unsigned int, std::align_val_t)\n                  '_ZnajSt11align_val_t', '_ZnajSt11align_val_tRKSt9nothrow_t',\n                  '_ZdaPv', '_ZdaPvRKSt9nothrow_t',  # operator delete[](void *)\n                  '_ZdlPv', '_ZdlPvRKSt9nothrow_t',  # operator delete(void *)\n                  '_ZdaPvm',                         # operator delete[](void*, unsigned long)\n                  '_ZdlPvm',                         # operator delete(void*, unsigned long)\n                  '_ZdaPvj',                         # operator delete[](void*, unsigned int)\n                  '_ZdlPvj',                         # operator delete(void*, unsigned int)\n                  # operator delete(void*, std::align_val_t)\n                  '_ZdlPvSt11align_val_t', '_ZdlPvSt11align_val_tRKSt9nothrow_t',\n                  # operator delete[](void*, std::align_val_t)\n                  '_ZdaPvSt11align_val_t', '_ZdaPvSt11align_val_tRKSt9nothrow_t',\n                  # operator delete(void*, unsigned long,  std::align_val_t)\n                  '_ZdlPvmSt11align_val_t',\n                  # operator delete[](void*, unsigned long, std::align_val_t)\n                  '_ZdaPvmSt11align_val_t',\n                  # operator delete(void*, unsigned int,  std::align_val_t)\n                  '_ZdlPvjSt11align_val_t',\n                  # operator delete[](void*, unsigned int, std::align_val_t)\n                  '_ZdaPvjSt11align_val_t',\n                  ])\n\nversioned_functions = set(['memcpy', 'pthread_attr_getaffinity_np',\n                           'pthread_cond_broadcast',\n                           'pthread_cond_destroy', 'pthread_cond_init',\n                           'pthread_cond_signal', 'pthread_cond_timedwait',\n                           'pthread_cond_wait', 'realpath',\n                           'sched_getaffinity'])\n\ndef get_global_functions(nm_executable, library):\n  functions = []\n  nm = os.environ.get('NM', nm_executable)\n  nm_proc = subprocess.Popen([nm, library], stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE)\n  nm_out = nm_proc.communicate()[0].decode().split('\\n')\n  if nm_proc.returncode != 0:\n    raise subprocess.CalledProcessError(nm_proc.returncode, nm)\n  func_symbols = ['T', 'W']\n  # On PowerPC, nm prints function descriptors from .data section.\n  if platform.uname()[4] in [\"powerpc\", \"ppc64\"]:\n    func_symbols += ['D']\n  for line in nm_out:\n    cols = line.split(' ')\n    if len(cols) == 3 and cols[1] in func_symbols :\n      functions.append(cols[2])\n  return functions\n\ndef main(argv):\n  parser = argparse.ArgumentParser()\n  parser.add_argument('--version-list', action='store_true')\n  parser.add_argument('--extra', default=[], action='append')\n  parser.add_argument('libraries', default=[], nargs='+')\n  parser.add_argument('--nm-executable', required=True)\n  parser.add_argument('-o', '--output', required=True)\n  args = parser.parse_args()\n\n  result = []\n\n  all_functions = []\n  for library in args.libraries:\n    all_functions.extend(get_global_functions(args.nm_executable, library))\n  function_set = set(all_functions)\n  for func in all_functions:\n    # Export new/delete operators.\n    if func in new_delete:\n      result.append(func)\n      continue\n    # Export interceptors.\n    match = re.match('__interceptor_(.*)', func)\n    if match:\n      result.append(func)\n      # We have to avoid exporting the interceptors for versioned library\n      # functions due to gold internal error.\n      orig_name = match.group(1)\n      if orig_name in function_set and (args.version_list or orig_name not in versioned_functions):\n        result.append(orig_name)\n      continue\n    # Export sanitizer interface functions.\n    if re.match('__sanitizer_(.*)', func):\n      result.append(func)\n\n  # Additional exported functions from files.\n  for fname in args.extra:\n    f = open(fname, 'r')\n    for line in f:\n      result.append(line.rstrip())\n  # Print the resulting list in the format recognized by ld.\n  with open(args.output, 'w') as f:\n    print('{', file=f)\n    if args.version_list:\n      print('global:', file=f)\n    result.sort()\n    for sym in result:\n      print(u'  %s;' % sym, file=f)\n    if args.version_list:\n      print('local:', file=f)\n      print('  *;', file=f)\n    print('};', file=f)\n\nif __name__ == '__main__':\n  main(sys.argv)\n"
  },
  {
    "path": "runtime/sanitizer_common/scripts/litlint.py",
    "content": "#!/usr/bin/env python\n#\n# litlint\n#\n# Ensure RUN commands in lit tests are free of common errors.\n#\n# If any errors are detected, litlint returns a nonzero exit code.\n#\n\nimport optparse\nimport re\nimport sys\nfrom io import open\n\n# Compile regex once for all files\nrunRegex = re.compile(r'(?<!-o)(?<!%run) %t\\s')\n\ndef LintLine(s):\n  \"\"\" Validate a line\n\n  Args:\n    s: str, the line to validate\n\n  Returns:\n    Returns an error message and a 1-based column number if an error was\n    detected, otherwise (None, None).\n  \"\"\"\n\n  # Check that RUN command can be executed with an emulator\n  m = runRegex.search(s)\n  if m:\n    start, end = m.span()\n    return ('missing %run before %t', start + 2)\n\n  # No errors\n  return (None, None)\n\n\ndef LintFile(p):\n  \"\"\" Check that each RUN command can be executed with an emulator\n\n  Args:\n    p: str, valid path to a file\n\n  Returns:\n    The number of errors detected.\n  \"\"\"\n  errs = 0\n  with open(p, 'r', encoding='utf-8') as f:\n    for i, s in enumerate(f.readlines(), start=1):\n      msg, col = LintLine(s)\n      if msg != None:\n        errs += 1\n        errorMsg = 'litlint: {}:{}:{}: error: {}.\\n{}{}\\n'\n        arrow = (col-1) * ' ' + '^'\n        sys.stderr.write(errorMsg.format(p, i, col, msg, s, arrow))\n  return errs\n\n\nif __name__ == \"__main__\":\n  # Parse args\n  parser = optparse.OptionParser()\n  parser.add_option('--filter')  # ignored\n  (options, filenames) = parser.parse_args()\n\n  # Lint each file\n  errs = 0\n  for p in filenames:\n    errs += LintFile(p)\n\n  # If errors, return nonzero\n  if errs > 0:\n    sys.exit(1)\n"
  },
  {
    "path": "runtime/sanitizer_common/scripts/litlint_test.py",
    "content": "#!/usr/bin/env python\n\n# Tests for litlint.py\n#\n# Usage: python litlint_test.py\n#\n# Returns nonzero if any test fails\n\nimport litlint\nimport unittest\n\nclass TestLintLine(unittest.TestCase):\n  def test_missing_run(self):\n    f = litlint.LintLine\n    self.assertEqual(f(' %t '),     ('missing %run before %t', 2))\n    self.assertEqual(f(' %t\\n'),    ('missing %run before %t', 2))\n    self.assertEqual(f(' %t.so '),  (None, None))\n    self.assertEqual(f(' %t.o '),   (None, None))\n    self.assertEqual(f('%run %t '), (None, None))\n    self.assertEqual(f('-o %t '),   (None, None))\n\nif __name__ == '__main__':\n  unittest.main()\n"
  },
  {
    "path": "runtime/sanitizer_common/scripts/sancov.py",
    "content": "#!/usr/bin/env python\n# Merge or print the coverage data collected by asan's coverage.\n# Input files are sequences of 4-byte integers.\n# We need to merge these integers into a set and then\n# either print them (as hex) or dump them into another file.\nimport array\nimport bisect\nimport glob\nimport os.path\nimport struct\nimport subprocess\nimport sys\n\nprog_name = \"\"\n\ndef Usage():\n  sys.stderr.write(\n    \"Usage: \\n\" + \\\n    \" \" + prog_name + \" merge FILE [FILE...] > OUTPUT\\n\" \\\n    \" \" + prog_name + \" print FILE [FILE...]\\n\" \\\n    \" \" + prog_name + \" unpack FILE [FILE...]\\n\" \\\n    \" \" + prog_name + \" rawunpack FILE [FILE ...]\\n\" \\\n    \" \" + prog_name + \" missing BINARY < LIST_OF_PCS\\n\" \\\n    \"\\n\")\n  exit(1)\n\ndef CheckBits(bits):\n  if bits != 32 and bits != 64:\n    raise Exception(\"Wrong bitness: %d\" % bits)\n\ndef TypeCodeForBits(bits):\n  CheckBits(bits)\n  return 'L' if bits == 64 else 'I'\n\ndef TypeCodeForStruct(bits):\n  CheckBits(bits)\n  return 'Q' if bits == 64 else 'I'\n\nkMagic32SecondHalf = 0xFFFFFF32;\nkMagic64SecondHalf = 0xFFFFFF64;\nkMagicFirstHalf    = 0xC0BFFFFF;\n\ndef MagicForBits(bits):\n  CheckBits(bits)\n  if sys.byteorder == 'little':\n    return [kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf, kMagicFirstHalf]\n  else:\n    return [kMagicFirstHalf, kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf]\n\ndef ReadMagicAndReturnBitness(f, path):\n  magic_bytes = f.read(8)\n  magic_words = struct.unpack('II', magic_bytes);\n  bits = 0\n  idx = 1 if sys.byteorder == 'little' else 0\n  if magic_words[idx] == kMagicFirstHalf:\n    if magic_words[1-idx] == kMagic64SecondHalf:\n      bits = 64\n    elif magic_words[1-idx] == kMagic32SecondHalf:\n      bits = 32\n  if bits == 0:\n    raise Exception('Bad magic word in %s' % path)\n  return bits\n\ndef ReadOneFile(path):\n  with open(path, mode=\"rb\") as f:\n    f.seek(0, 2)\n    size = f.tell()\n    f.seek(0, 0)\n    if size < 8:\n      raise Exception('File %s is short (< 8 bytes)' % path)\n    bits = ReadMagicAndReturnBitness(f, path)\n    size -= 8\n    w = size * 8 // bits\n    s = struct.unpack_from(TypeCodeForStruct(bits) * (w), f.read(size))\n  sys.stderr.write(\n    \"%s: read %d %d-bit PCs from %s\\n\" % (prog_name, w, bits, path))\n  return s\n\ndef Merge(files):\n  s = set()\n  for f in files:\n    s = s.union(set(ReadOneFile(f)))\n  sys.stderr.write(\n    \"%s: %d files merged; %d PCs total\\n\" % (prog_name, len(files), len(s))\n  )\n  return sorted(s)\n\ndef PrintFiles(files):\n  if len(files) > 1:\n    s = Merge(files)\n  else:  # If there is just on file, print the PCs in order.\n    s = ReadOneFile(files[0])\n    sys.stderr.write(\"%s: 1 file merged; %d PCs total\\n\" % (prog_name, len(s)))\n  for i in s:\n    print(\"0x%x\" % i)\n\ndef MergeAndPrint(files):\n  if sys.stdout.isatty():\n    Usage()\n  s = Merge(files)\n  bits = 32\n  if max(s) > 0xFFFFFFFF:\n    bits = 64\n  stdout_buf = getattr(sys.stdout, 'buffer', sys.stdout)\n  array.array('I', MagicForBits(bits)).tofile(stdout_buf)\n  a = struct.pack(TypeCodeForStruct(bits) * len(s), *s)\n  stdout_buf.write(a)\n\n\ndef UnpackOneFile(path):\n  with open(path, mode=\"rb\") as f:\n    sys.stderr.write(\"%s: unpacking %s\\n\" % (prog_name, path))\n    while True:\n      header = f.read(12)\n      if not header: return\n      if len(header) < 12:\n        break\n      pid, module_length, blob_size = struct.unpack('iII', header)\n      module = f.read(module_length).decode('utf-8')\n      blob = f.read(blob_size)\n      assert(len(module) == module_length)\n      assert(len(blob) == blob_size)\n      extracted_file = \"%s.%d.sancov\" % (module, pid)\n      sys.stderr.write(\"%s: extracting %s\\n\" % (prog_name, extracted_file))\n      # The packed file may contain multiple blobs for the same pid/module\n      # pair. Append to the end of the file instead of overwriting.\n      with open(extracted_file, 'ab') as f2:\n        f2.write(blob)\n    # fail\n    raise Exception('Error reading file %s' % path)\n\n\ndef Unpack(files):\n  for f in files:\n    UnpackOneFile(f)\n\ndef UnpackOneRawFile(path, map_path):\n  mem_map = []\n  with open(map_path, mode=\"rt\") as f_map:\n    sys.stderr.write(\"%s: reading map %s\\n\" % (prog_name, map_path))\n    bits = int(f_map.readline())\n    if bits != 32 and bits != 64:\n      raise Exception('Wrong bits size in the map')\n    for line in f_map:\n      parts = line.rstrip().split()\n      mem_map.append((int(parts[0], 16),\n                  int(parts[1], 16),\n                  int(parts[2], 16),\n                  ' '.join(parts[3:])))\n  mem_map.sort(key=lambda m : m[0])\n  mem_map_keys = [m[0] for m in mem_map]\n\n  with open(path, mode=\"rb\") as f:\n    sys.stderr.write(\"%s: unpacking %s\\n\" % (prog_name, path))\n\n    f.seek(0, 2)\n    size = f.tell()\n    f.seek(0, 0)\n    pcs = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 // bits), f.read(size))\n    mem_map_pcs = [[] for i in range(0, len(mem_map))]\n\n    for pc in pcs:\n      if pc == 0: continue\n      map_idx = bisect.bisect(mem_map_keys, pc) - 1\n      (start, end, base, module_path) = mem_map[map_idx]\n      assert pc >= start\n      if pc >= end:\n        sys.stderr.write(\"warning: %s: pc %x outside of any known mapping\\n\" % (prog_name, pc))\n        continue\n      mem_map_pcs[map_idx].append(pc - base)\n\n    for ((start, end, base, module_path), pc_list) in zip(mem_map, mem_map_pcs):\n      if len(pc_list) == 0: continue\n      assert path.endswith('.sancov.raw')\n      dst_path = module_path + '.' + os.path.basename(path)[:-4]\n      sys.stderr.write(\"%s: writing %d PCs to %s\\n\" % (prog_name, len(pc_list), dst_path))\n      sorted_pc_list = sorted(pc_list)\n      pc_buffer = struct.pack(TypeCodeForStruct(bits) * len(pc_list), *sorted_pc_list)\n      with open(dst_path, 'ab+') as f2:\n        array.array('I', MagicForBits(bits)).tofile(f2)\n        f2.seek(0, 2)\n        f2.write(pc_buffer)\n\ndef RawUnpack(files):\n  for f in files:\n    if not f.endswith('.sancov.raw'):\n      raise Exception('Unexpected raw file name %s' % f)\n    f_map = f[:-3] + 'map'\n    UnpackOneRawFile(f, f_map)\n\ndef GetInstrumentedPCs(binary):\n  # This looks scary, but all it does is extract all offsets where we call:\n  # - __sanitizer_cov() or __sanitizer_cov_with_check(),\n  # - with call or callq,\n  # - directly or via PLT.\n  cmd = r\"objdump --no-show-raw-insn -d %s | \" \\\n        r\"grep '^\\s\\+[0-9a-f]\\+:\\s\\+call\\(q\\|\\)\\s\\+\\(0x\\|\\)[0-9a-f]\\+ <__sanitizer_cov\\(_with_check\\|\\|_trace_pc_guard\\)\\(@plt\\|\\)>' | \" \\\n        r\"grep -o '^\\s\\+[0-9a-f]\\+'\" % binary\n  lines = subprocess.check_output(cmd, stdin=subprocess.PIPE, shell=True).splitlines()\n  # The PCs we get from objdump are off by 4 bytes, as they point to the\n  # beginning of the callq instruction. Empirically this is true on x86 and\n  # x86_64.\n  return set(int(line.strip(), 16) + 4 for line in lines)\n\ndef PrintMissing(binary):\n  if not os.path.isfile(binary):\n    raise Exception('File not found: %s' % binary)\n  instrumented = GetInstrumentedPCs(binary)\n  sys.stderr.write(\"%s: found %d instrumented PCs in %s\\n\" % (prog_name,\n                                                              len(instrumented),\n                                                              binary))\n  covered = set(int(line, 16) for line in sys.stdin)\n  sys.stderr.write(\"%s: read %d PCs from stdin\\n\" % (prog_name, len(covered)))\n  missing = instrumented - covered\n  sys.stderr.write(\"%s: %d PCs missing from coverage\\n\" % (prog_name, len(missing)))\n  if (len(missing) > len(instrumented) - len(covered)):\n    sys.stderr.write(\n      \"%s: WARNING: stdin contains PCs not found in binary\\n\" % prog_name\n    )\n  for pc in sorted(missing):\n    print(\"0x%x\" % pc)\n\nif __name__ == '__main__':\n  prog_name = sys.argv[0]\n  if len(sys.argv) <= 2:\n    Usage();\n\n  if sys.argv[1] == \"missing\":\n    if len(sys.argv) != 3:\n      Usage()\n    PrintMissing(sys.argv[2])\n    exit(0)\n\n  file_list = []\n  for f in sys.argv[2:]:\n    file_list += glob.glob(f)\n  if not file_list:\n    Usage()\n\n  if sys.argv[1] == \"print\":\n    PrintFiles(file_list)\n  elif sys.argv[1] == \"merge\":\n    MergeAndPrint(file_list)\n  elif sys.argv[1] == \"unpack\":\n    Unpack(file_list)\n  elif sys.argv[1] == \"rawunpack\":\n    RawUnpack(file_list)\n  else:\n    Usage()\n"
  },
  {
    "path": "runtime/sanitizer_common/symbolizer/sanitizer_symbolize.cpp",
    "content": "//===-- sanitizer_symbolize.cpp ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Implementation of weak hooks from sanitizer_symbolizer_posix_libcdep.cpp.\n//\n//===----------------------------------------------------------------------===//\n\n#include <stdio.h>\n\n#include <string>\n\n#include \"llvm/DebugInfo/Symbolize/DIPrinter.h\"\n#include \"llvm/DebugInfo/Symbolize/Symbolize.h\"\n\nstatic llvm::symbolize::LLVMSymbolizer *Symbolizer = nullptr;\nstatic bool Demangle = true;\nstatic bool InlineFrames = true;\n\nstatic llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {\n  if (Symbolizer)\n    return Symbolizer;\n  llvm::symbolize::LLVMSymbolizer::Options Opts;\n  Opts.Demangle = Demangle;\n  Symbolizer = new llvm::symbolize::LLVMSymbolizer(Opts);\n  return Symbolizer;\n}\n\nstatic llvm::symbolize::PrinterConfig getDefaultPrinterConfig() {\n  llvm::symbolize::PrinterConfig Config;\n  Config.Pretty = false;\n  Config.Verbose = false;\n  Config.PrintFunctions = true;\n  Config.PrintAddress = false;\n  Config.SourceContextLines = 0;\n  return Config;\n}\n\nnamespace __sanitizer {\nint internal_snprintf(char *buffer, unsigned long length, const char *format,\n                      ...);\n}  // namespace __sanitizer\n\nextern \"C\" {\n\ntypedef uint64_t u64;\n\nbool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,\n                                char *Buffer, int MaxLength) {\n  std::string Result;\n  {\n    llvm::raw_string_ostream OS(Result);\n    llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();\n    llvm::symbolize::Request Request{ModuleName, ModuleOffset};\n    auto Printer =\n        std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);\n\n    // TODO: it is neccessary to set proper SectionIndex here.\n    // object::SectionedAddress::UndefSection works for only absolute addresses.\n    if (InlineFrames) {\n      auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode(\n          ModuleName,\n          {ModuleOffset, llvm::object::SectionedAddress::UndefSection});\n      Printer->print(Request,\n                     ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());\n    } else {\n      auto ResOrErr = getDefaultSymbolizer()->symbolizeCode(\n          ModuleName,\n          {ModuleOffset, llvm::object::SectionedAddress::UndefSection});\n      Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DILineInfo());\n    }\n  }\n  return __sanitizer::internal_snprintf(Buffer, MaxLength, \"%s\",\n                                        Result.c_str()) < MaxLength;\n}\n\nbool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,\n                                char *Buffer, int MaxLength) {\n  std::string Result;\n  {\n    llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();\n    llvm::raw_string_ostream OS(Result);\n    llvm::symbolize::Request Request{ModuleName, ModuleOffset};\n    auto Printer =\n        std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);\n\n    // TODO: it is neccessary to set proper SectionIndex here.\n    // object::SectionedAddress::UndefSection works for only absolute addresses.\n    auto ResOrErr = getDefaultSymbolizer()->symbolizeData(\n        ModuleName,\n        {ModuleOffset, llvm::object::SectionedAddress::UndefSection});\n    Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DIGlobal());\n  }\n  return __sanitizer::internal_snprintf(Buffer, MaxLength, \"%s\",\n                                        Result.c_str()) < MaxLength;\n}\n\nvoid __sanitizer_symbolize_flush() {\n  if (Symbolizer)\n    Symbolizer->flush();\n}\n\nint __sanitizer_symbolize_demangle(const char *Name, char *Buffer,\n                                   int MaxLength) {\n  std::string Result =\n      llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);\n  return __sanitizer::internal_snprintf(Buffer, MaxLength, \"%s\",\n                                        Result.c_str()) < MaxLength\n             ? static_cast<int>(Result.size() + 1)\n             : 0;\n}\n\nbool __sanitizer_symbolize_set_demangle(bool Value) {\n  // Must be called before LLVMSymbolizer created.\n  if (Symbolizer)\n    return false;\n  Demangle = Value;\n  return true;\n}\n\nbool __sanitizer_symbolize_set_inline_frames(bool Value) {\n  InlineFrames = Value;\n  return true;\n}\n\n// Override __cxa_atexit and ignore callbacks.\n// This prevents crashes in a configuration when the symbolizer\n// is built into sanitizer runtime and consequently into the test process.\n// LLVM libraries have some global objects destroyed during exit,\n// so if the test process triggers any bugs after that, the symbolizer crashes.\n// An example stack trace of such crash:\n//\n// #1  __cxa_throw\n// #2  std::__u::__throw_system_error\n// #3  std::__u::recursive_mutex::lock\n// #4  __sanitizer_llvm::ManagedStaticBase::RegisterManagedStatic\n// #5  __sanitizer_llvm::errorToErrorCode\n// #6  __sanitizer_llvm::getFileAux\n// #7  __sanitizer_llvm::MemoryBuffer::getFileOrSTDIN\n// #10 __sanitizer_llvm::symbolize::LLVMSymbolizer::getOrCreateModuleInfo\n// #13 __sanitizer::Symbolizer::SymbolizeData\n// #14 __tsan::SymbolizeData\n// #16 __tsan::ReportRace\n// #18 __tsan_write4\n// #19 race() () at test/tsan/atexit4.cpp\n// #20 cxa_at_exit_wrapper\n// #21 __cxa_finalize\n// #22 __do_fini\n//\n// For the standalone llvm-symbolizer this does not hurt,\n// we just don't destroy few global objects on exit.\nint __cxa_atexit(void (*f)(void *a), void *arg, void *dso) { return 0; }\n\n}  // extern \"C\"\n"
  },
  {
    "path": "runtime/sanitizer_common/symbolizer/sanitizer_wrappers.cpp",
    "content": "//===-- sanitizer_wrappers.cpp ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Redirect some functions to sanitizer interceptors.\n//\n//===----------------------------------------------------------------------===//\n\n#include <dlfcn.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <stdarg.h>\n#include <stdio.h>\n#include <unistd.h>\n\n#include <tuple>\n\nnamespace __sanitizer {\nunsigned long internal_open(const char *filename, int flags);\nunsigned long internal_open(const char *filename, int flags, unsigned mode);\nunsigned long internal_close(int fd);\nunsigned long internal_stat(const char *path, void *buf);\nunsigned long internal_lstat(const char *path, void *buf);\nunsigned long internal_fstat(int fd, void *buf);\nsize_t internal_strlen(const char *s);\nunsigned long internal_mmap(void *addr, unsigned long length, int prot,\n                            int flags, int fd, unsigned long long offset);\nvoid *internal_memcpy(void *dest, const void *src, unsigned long n);\n// Used to propagate errno.\nbool internal_iserror(unsigned long retval, int *rverrno = 0);\n}  // namespace __sanitizer\n\nnamespace {\n\ntemplate <typename T>\nstruct GetTypes;\n\ntemplate <typename R, typename... Args>\nstruct GetTypes<R(Args...)> {\n  using Result = R;\n  template <size_t i>\n  struct Arg {\n    using Type = typename std::tuple_element<i, std::tuple<Args...>>::type;\n  };\n};\n\n#define LLVM_SYMBOLIZER_GET_FUNC(Function) \\\n  ((__interceptor_##Function)              \\\n       ? (__interceptor_##Function)        \\\n       : reinterpret_cast<decltype(&Function)>(dlsym(RTLD_NEXT, #Function)))\n\n#define LLVM_SYMBOLIZER_INTERCEPTOR1(Function, ...)               \\\n  GetTypes<__VA_ARGS__>::Result __interceptor_##Function(         \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type) __attribute__((weak)); \\\n  GetTypes<__VA_ARGS__>::Result Function(                         \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type arg0) {                 \\\n    return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0);              \\\n  }\n\n#define LLVM_SYMBOLIZER_INTERCEPTOR2(Function, ...)               \\\n  GetTypes<__VA_ARGS__>::Result __interceptor_##Function(         \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type,                        \\\n      GetTypes<__VA_ARGS__>::Arg<1>::Type) __attribute__((weak)); \\\n  GetTypes<__VA_ARGS__>::Result Function(                         \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type arg0,                   \\\n      GetTypes<__VA_ARGS__>::Arg<1>::Type arg1) {                 \\\n    return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1);        \\\n  }\n\n#define LLVM_SYMBOLIZER_INTERCEPTOR3(Function, ...)               \\\n  GetTypes<__VA_ARGS__>::Result __interceptor_##Function(         \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type,                        \\\n      GetTypes<__VA_ARGS__>::Arg<1>::Type,                        \\\n      GetTypes<__VA_ARGS__>::Arg<2>::Type) __attribute__((weak)); \\\n  GetTypes<__VA_ARGS__>::Result Function(                         \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type arg0,                   \\\n      GetTypes<__VA_ARGS__>::Arg<1>::Type arg1,                   \\\n      GetTypes<__VA_ARGS__>::Arg<2>::Type arg2) {                 \\\n    return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2);  \\\n  }\n\n#define LLVM_SYMBOLIZER_INTERCEPTOR4(Function, ...)                    \\\n  GetTypes<__VA_ARGS__>::Result __interceptor_##Function(              \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type,                             \\\n      GetTypes<__VA_ARGS__>::Arg<1>::Type,                             \\\n      GetTypes<__VA_ARGS__>::Arg<2>::Type,                             \\\n      GetTypes<__VA_ARGS__>::Arg<3>::Type) __attribute__((weak));      \\\n  GetTypes<__VA_ARGS__>::Result Function(                              \\\n      GetTypes<__VA_ARGS__>::Arg<0>::Type arg0,                        \\\n      GetTypes<__VA_ARGS__>::Arg<1>::Type arg1,                        \\\n      GetTypes<__VA_ARGS__>::Arg<2>::Type arg2,                        \\\n      GetTypes<__VA_ARGS__>::Arg<3>::Type arg3) {                      \\\n    return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2, arg3); \\\n  }\n\n}  // namespace\n\n// C-style interface around internal sanitizer libc functions.\nextern \"C\" {\n\n#define RETURN_OR_SET_ERRNO(T, res)                   \\\n  int rverrno;                                        \\\n  if (__sanitizer::internal_iserror(res, &rverrno)) { \\\n    errno = rverrno;                                  \\\n    return (T)-1;                                     \\\n  }                                                   \\\n  return (T)res;\n\nint open(const char *filename, int flags, ...) {\n  unsigned long res;\n  if (flags | O_CREAT) {\n    va_list va;\n    va_start(va, flags);\n    unsigned mode = va_arg(va, unsigned);\n    va_end(va);\n    res = __sanitizer::internal_open(filename, flags, mode);\n  } else {\n    res = __sanitizer::internal_open(filename, flags);\n  }\n  RETURN_OR_SET_ERRNO(int, res);\n}\n\nint close(int fd) {\n  unsigned long res = __sanitizer::internal_close(fd);\n  RETURN_OR_SET_ERRNO(int, res);\n}\n\n#define STAT(func, arg, buf)                                  \\\n  unsigned long res = __sanitizer::internal_##func(arg, buf); \\\n  RETURN_OR_SET_ERRNO(int, res);\n\nint stat(const char *path, struct stat *buf) { STAT(stat, path, buf); }\n\nint lstat(const char *path, struct stat *buf) { STAT(lstat, path, buf); }\n\nint fstat(int fd, struct stat *buf) { STAT(fstat, fd, buf); }\n\n// Redirect versioned stat functions to the __sanitizer::internal() as well.\nint __xstat(int version, const char *path, struct stat *buf) {\n  STAT(stat, path, buf);\n}\n\nint __lxstat(int version, const char *path, struct stat *buf) {\n  STAT(lstat, path, buf);\n}\n\nint __fxstat(int version, int fd, struct stat *buf) { STAT(fstat, fd, buf); }\n\nsize_t strlen(const char *s) { return __sanitizer::internal_strlen(s); }\n\nvoid *mmap(void *addr, size_t length, int prot, int flags, int fd,\n           off_t offset) {\n  unsigned long res = __sanitizer::internal_mmap(\n      addr, (unsigned long)length, prot, flags, fd, (unsigned long long)offset);\n  RETURN_OR_SET_ERRNO(void *, res);\n}\n\nLLVM_SYMBOLIZER_INTERCEPTOR3(read, ssize_t(int, void *, size_t))\nLLVM_SYMBOLIZER_INTERCEPTOR4(pread, ssize_t(int, void *, size_t, off_t))\nLLVM_SYMBOLIZER_INTERCEPTOR4(pread64, ssize_t(int, void *, size_t, off64_t))\nLLVM_SYMBOLIZER_INTERCEPTOR2(realpath, char *(const char *, char *))\n\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_cond_broadcast, int(pthread_cond_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR2(pthread_cond_wait,\n                             int(pthread_cond_t *, pthread_mutex_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_lock, int(pthread_mutex_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_unlock, int(pthread_mutex_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_destroy, int(pthread_mutex_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR2(pthread_mutex_init,\n                             int(pthread_mutex_t *,\n                                 const pthread_mutexattr_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutexattr_destroy,\n                             int(pthread_mutexattr_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutexattr_init, int(pthread_mutexattr_t *))\nLLVM_SYMBOLIZER_INTERCEPTOR2(pthread_mutexattr_settype,\n                             int(pthread_mutexattr_t *, int))\nLLVM_SYMBOLIZER_INTERCEPTOR1(pthread_getspecific, void *(pthread_key_t))\nLLVM_SYMBOLIZER_INTERCEPTOR2(pthread_key_create,\n                             int(pthread_key_t *, void (*)(void *)))\nLLVM_SYMBOLIZER_INTERCEPTOR2(pthread_once,\n                             int(pthread_once_t *, void (*)(void)))\nLLVM_SYMBOLIZER_INTERCEPTOR2(pthread_setspecific,\n                             int(pthread_key_t, const void *))\nLLVM_SYMBOLIZER_INTERCEPTOR3(pthread_sigmask,\n                             int(int, const sigset_t *, sigset_t *))\n\n}  // extern \"C\"\n"
  },
  {
    "path": "runtime/sanitizer_common/symbolizer/scripts/ar_to_bc.sh",
    "content": "#!/usr/bin/env bash\n\nfunction usage() {\n  echo \"Usage: $0 INPUT... OUTPUT\"\n  exit 1\n}\n\nif [ \"$#\" -le 1 ]; then\n  usage\nfi\n\n[[ $AR == /* ]] || AR=$PWD/$AR\n[[ $LINK == /* ]] || LINK=$PWD/$LINK\n\nINPUTS=\nOUTPUT=\nfor ARG in $@; do\n  INPUTS=\"$INPUTS $OUTPUT\"\n  OUTPUT=$(readlink -f $ARG)\ndone\n\necho Inputs: $INPUTS\necho Output: $OUTPUT\n\nSCRATCH_DIR=$(mktemp -d)\nln -s $INPUTS $SCRATCH_DIR/\n\npushd $SCRATCH_DIR\n\nfor INPUT in *; do\n  for OBJ in $($AR t $INPUT); do\n    $AR x $INPUT $OBJ\n    mv -f $OBJ $(basename $INPUT).$OBJ\n  done\ndone\n\n$LINK *.o -o $OUTPUT\n\nrm -rf $SCRATCH_DIR\n"
  },
  {
    "path": "runtime/sanitizer_common/symbolizer/scripts/build_symbolizer.sh",
    "content": "#!/usr/bin/env bash\n#\n# Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \\\n#             build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/\n# zlib can be downloaded from http://www.zlib.net.\n#\n# Script compiles self-contained object file with symbolization code and injects\n# it into the given set of runtime libraries. Script updates only libraries\n# which has unresolved __sanitizer_symbolize_* symbols and matches architecture.\n# Object file is be compiled from LLVM sources with dependencies like libc++ and\n# zlib. Then it internalizes symbols in the file, so that it can be linked\n# into arbitrary programs, avoiding conflicts with the program own symbols and\n# avoiding dependencies on any program symbols. The only acceptable dependencies\n# are libc and __sanitizer::internal_* from sanitizer runtime.\n#\n# Symbols exported by the object file will be used by Sanitizer runtime\n# libraries to symbolize code/data in-process.\n#\n# The script will modify the output directory which is given as the first\n# argument to the script.\n#\n# FIXME: We should really be using a simpler approach to building this object\n# file, and it should be available as a regular cmake rule. Conceptually, we\n# want to be doing \"ld -r\" followed by \"objcopy -G\" to create a relocatable\n# object file with only our entry points exposed. However, this does not work at\n# present, see PR30750.\n\nset -x\nset -e\nset -u\n\nSCRIPT_DIR=$(cd \"$(dirname \"$0\")\" && pwd)\nSRC_DIR=$(readlink -f $SCRIPT_DIR/..)\nTARGE_DIR=$(readlink -f $1)\nCOMPILER_RT_SRC=$(readlink -f ${SCRIPT_DIR}/../../../..)\nLLVM_SRC=${LLVM_SRC:-${COMPILER_RT_SRC}/../llvm}\nLLVM_SRC=$(readlink -f $LLVM_SRC)\nif [[ ! -d \"${LLVM_SRC}/../llvm\" ]] ; then\n  LLVM_SRC=$(readlink -f ${COMPILER_RT_SRC}/../../../llvm)\nfi\nLIBCXX_SRC=$(readlink -f ${COMPILER_RT_SRC}/../libcxx)\nLIBCXXABI_SRC=$(readlink -f ${COMPILER_RT_SRC}/../libcxxabi)\n\nif [[ ! -d \"${LLVM_SRC}/../llvm\" ||\n      ! -d \"${LIBCXX_SRC}\" ||\n      ! -d \"${LIBCXXABI_SRC}\" ]]; then\n  echo \"Missing or incomplete LLVM_SRC\"\n  exit 1\nfi\n\nif [[ \"$ZLIB_SRC\" == \"\"  ||\n      ! -x \"${ZLIB_SRC}/configure\" ||\n      ! -f \"${ZLIB_SRC}/zlib.h\" ]]; then\n  echo \"Missing or incomplete ZLIB_SRC\"\n  exit 1\nfi\nZLIB_SRC=$(readlink -f $ZLIB_SRC)\n\nJ=\"${J:-50}\"\n\nCLANG=\"${CLANG:-`which clang`}\"\nCLANG_DIR=$(readlink -f $(dirname \"$CLANG\"))\n\nBUILD_DIR=$(readlink -f ./symbolizer)\nmkdir -p $BUILD_DIR\ncd $BUILD_DIR\n\nCC=$CLANG_DIR/clang\nCXX=$CLANG_DIR/clang++\nTBLGEN=$CLANG_DIR/llvm-tblgen\nOPT=$CLANG_DIR/opt\nexport AR=$CLANG_DIR/llvm-ar\nexport LINK=$CLANG_DIR/llvm-link\nTARGET_TRIPLE=$($CC -print-target-triple)\n\nfor F in $CC $CXX $TBLGEN $LINK $OPT $AR; do\n  if [[ ! -x \"$F\" ]]; then\n    echo \"Missing $F\"\n     exit 1\n  fi\ndone\n\nZLIB_BUILD=${BUILD_DIR}/zlib\nLIBCXX_BUILD=${BUILD_DIR}/libcxx\nLLVM_BUILD=${BUILD_DIR}/llvm\nSYMBOLIZER_BUILD=${BUILD_DIR}/symbolizer\n\nFLAGS=${FLAGS:-}\nFLAGS=\"$FLAGS -fPIC -flto -Os -g0 -DNDEBUG\"\n\n# Build zlib.\nmkdir -p ${ZLIB_BUILD}\ncd ${ZLIB_BUILD}\ncp -r ${ZLIB_SRC}/* .\nCC=$CC CFLAGS=\"$FLAGS\" RANLIB=/bin/true ./configure --static\nmake -j${J} libz.a\n\n# Build and install libcxxabi and libcxx.\nif [[ ! -d ${LIBCXX_BUILD} ]]; then\n  mkdir -p ${LIBCXX_BUILD}\n  cd ${LIBCXX_BUILD}\n  LIBCXX_FLAGS=\"${FLAGS} -Wno-macro-redefined\"\n  PROJECTS=\n  if [[ ! -d $LLVM_SRC/projects/libcxxabi ]] ; then\n    PROJECTS=\"-DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi'\"\n  fi\n  cmake -GNinja \\\n    ${PROJECTS} \\\n    -DCMAKE_BUILD_TYPE=Release \\\n    -DCMAKE_C_COMPILER=$CC \\\n    -DCMAKE_CXX_COMPILER=$CXX \\\n    -DCMAKE_C_FLAGS_RELEASE=\"${LIBCXX_FLAGS}\" \\\n    -DCMAKE_CXX_FLAGS_RELEASE=\"${LIBCXX_FLAGS}\" \\\n    -DLIBCXXABI_ENABLE_ASSERTIONS=OFF \\\n    -DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \\\n    -DLIBCXXABI_ENABLE_SHARED=OFF \\\n    -DLIBCXX_ENABLE_ASSERTIONS=OFF \\\n    -DLIBCXX_ENABLE_EXCEPTIONS=OFF \\\n    -DLIBCXX_ENABLE_RTTI=OFF \\\n    -DLIBCXX_ENABLE_SHARED=OFF \\\n  $LLVM_SRC\nfi\ncd ${LIBCXX_BUILD}\nninja cxx cxxabi\n\nFLAGS=\"${FLAGS} -fno-rtti -fno-exceptions\"\nLLVM_FLAGS=\"${FLAGS} -nostdinc++ -I${ZLIB_BUILD} -isystem ${LIBCXX_BUILD}/include/${TARGET_TRIPLE}/c++/v1 -isystem ${LIBCXX_BUILD}/include/c++/v1 -Wno-error=global-constructors\"\n\n# Build LLVM.\nif [[ ! -d ${LLVM_BUILD} ]]; then\n  mkdir -p ${LLVM_BUILD}\n  cd ${LLVM_BUILD}\n  cmake -GNinja \\\n    -DCMAKE_BUILD_TYPE=Release \\\n    -DCMAKE_C_COMPILER=$CC \\\n    -DCMAKE_CXX_COMPILER=$CXX \\\n    -DCMAKE_C_FLAGS_RELEASE=\"${LLVM_FLAGS}\" \\\n    -DCMAKE_CXX_FLAGS_RELEASE=\"${LLVM_FLAGS}\" \\\n    -DLLVM_TABLEGEN=$TBLGEN \\\n    -DLLVM_ENABLE_ZLIB=ON \\\n    -DLLVM_ENABLE_TERMINFO=OFF \\\n    -DLLVM_ENABLE_THREADS=OFF \\\n  $LLVM_SRC\nfi\ncd ${LLVM_BUILD}\nninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMDebuginfod LLVMMC LLVMDemangle LLVMTextAPI\n\ncd ${BUILD_DIR}\nrm -rf ${SYMBOLIZER_BUILD}\nmkdir ${SYMBOLIZER_BUILD}\ncd ${SYMBOLIZER_BUILD}\n\necho \"Compiling...\"\nSYMBOLIZER_FLAGS=\"$LLVM_FLAGS -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -std=c++14\"\n$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cpp ${SRC_DIR}/sanitizer_wrappers.cpp -c\n$AR rc symbolizer.a sanitizer_symbolize.o sanitizer_wrappers.o\n\nSYMBOLIZER_API_LIST=__sanitizer_symbolize_code\nSYMBOLIZER_API_LIST+=,__sanitizer_symbolize_data\nSYMBOLIZER_API_LIST+=,__sanitizer_symbolize_flush\nSYMBOLIZER_API_LIST+=,__sanitizer_symbolize_demangle\nSYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_demangle\nSYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_inline_frames\n\nLIBCXX_ARCHIVE_DIR=$(dirname $(find $LIBCXX_BUILD -name libc++.a | head -n1))\n\n# Merge all the object files together and copy the resulting library back.\n$SCRIPT_DIR/ar_to_bc.sh $LIBCXX_ARCHIVE_DIR/libc++.a \\\n                        $LIBCXX_ARCHIVE_DIR/libc++abi.a \\\n                        $LLVM_BUILD/lib/libLLVMSymbolize.a \\\n                        $LLVM_BUILD/lib/libLLVMObject.a \\\n                        $LLVM_BUILD/lib/libLLVMBinaryFormat.a \\\n                        $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \\\n                        $LLVM_BUILD/lib/libLLVMSupport.a \\\n                        $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \\\n                        $LLVM_BUILD/lib/libLLVMDebugInfoMSF.a \\\n                        $LLVM_BUILD/lib/libLLVMDebugInfoCodeView.a \\\n                        $LLVM_BUILD/lib/libLLVMDebuginfod.a \\\n                        $LLVM_BUILD/lib/libLLVMDemangle.a \\\n                        $LLVM_BUILD/lib/libLLVMMC.a \\\n                        $LLVM_BUILD/lib/libLLVMTextAPI.a \\\n                        $ZLIB_BUILD/libz.a \\\n                        symbolizer.a \\\n                        all.bc\n\necho \"Optimizing...\"\n$OPT -internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc\n$CC $FLAGS -fno-lto -c opt.bc -o symbolizer.o\n\necho \"Checking undefined symbols...\"\nnm -f posix -g symbolizer.o | cut -f 1,2 -d \\  | LC_COLLATE=C sort -u > undefined.new\n(diff -u $SCRIPT_DIR/global_symbols.txt undefined.new | grep -E \"^\\+[^+]\") && \\\n  (echo \"Failed: unexpected symbols\"; exit 1)\n\narch() {\n  objdump -f $1 | grep -m1 -Po \"(?<=file format ).*$\"\n}\n\nSYMBOLIZER_FORMAT=$(arch symbolizer.o)\necho \"Injecting $SYMBOLIZER_FORMAT symbolizer...\"\nfor A in $TARGE_DIR/libclang_rt.*san*.a; do\n  A_FORMAT=$(arch $A)\n  if [[ \"$A_FORMAT\" != \"$SYMBOLIZER_FORMAT\" ]] ; then\n    continue\n  fi\n  (nm -u $A 2>/dev/null | grep -E \"__sanitizer_symbolize_code\" >/dev/null) || continue\n  echo \"$A\"\n  $AR rcs $A symbolizer.o\ndone\n\necho \"Success!\"\n"
  },
  {
    "path": "runtime/sanitizer_common/symbolizer/scripts/global_symbols.txt",
    "content": "_GLOBAL_OFFSET_TABLE_ U\n_ZN11__sanitizer13internal_mmapEPvmiiiy U\n_ZN11__sanitizer13internal_openEPKcij U\n_ZN11__sanitizer13internal_statEPKcPv U\n_ZN11__sanitizer14internal_closeEi U\n_ZN11__sanitizer14internal_fstatEiPv U\n_ZN11__sanitizer14internal_lstatEPKcPv U\n_ZN11__sanitizer15internal_strlenEPKc U\n_ZN11__sanitizer16internal_iserrorEmPi U\n_ZN11__sanitizer17internal_snprintfEPcmPKcz U\n__ctype_b_loc U\n__ctype_get_mb_cur_max U\n__cxa_atexit U\n__divdi3 U\n__dso_handle U\n__errno_location U\n__interceptor_pread w\n__interceptor_pthread_cond_broadcast w\n__interceptor_pthread_cond_wait w\n__interceptor_pthread_getspecific w\n__interceptor_pthread_key_create w\n__interceptor_pthread_mutex_destroy w\n__interceptor_pthread_mutex_init w\n__interceptor_pthread_mutex_lock w\n__interceptor_pthread_mutex_unlock w\n__interceptor_pthread_mutexattr_destroy w\n__interceptor_pthread_mutexattr_init w\n__interceptor_pthread_mutexattr_settype w\n__interceptor_pthread_once w\n__interceptor_pthread_setspecific w\n__interceptor_read w\n__interceptor_realpath w\n__isinf U\n__isoc99_sscanf U\n__isoc99_vsscanf U\n__moddi3 U\n__sanitizer_symbolize_code T\n__sanitizer_symbolize_data T\n__sanitizer_symbolize_demangle T\n__sanitizer_symbolize_flush T\n__sanitizer_symbolize_set_demangle T\n__sanitizer_symbolize_set_inline_frames T\n__strdup U\n__udivdi3 U\n__umoddi3 U\n_exit U\nabort U\naccess U\nbcmp U\ncalloc U\ncatclose U\ncatgets U\ncatopen U\nceil U\nceilf U\ncfgetospeed U\nclock_gettime U\ndl_iterate_phdr U\ndlsym U\ndup U\ndup2 U\nenviron U\nexecv U\nexecve U\nexit U\nfclose U\nfflush U\nfileno U\nfopen U\nfork U\nfprintf U\nfputc U\nfree U\nfreelocale U\nfwrite U\ngetc U\ngetcwd U\ngetenv U\ngetpagesize U\ngetpid U\ngetpwuid U\ngetrlimit U\ngettimeofday U\ngetuid U\nioctl U\nisalnum U\nisalpha U\nisatty U\nislower U\nisprint U\nisspace U\nisupper U\nisxdigit U\nlog10 U\nlseek U\nlseek64 U\nmadvise U\nmalloc U\nmbrlen U\nmbrtowc U\nmbsnrtowcs U\nmbsrtowcs U\nmbtowc U\nmemchr U\nmemcmp U\nmemcpy U\nmemmove U\nmemset U\nmkdir U\nmunmap U\nnewlocale U\nperror U\nposix_spawn U\nposix_spawn_file_actions_adddup2 U\nposix_spawn_file_actions_addopen U\nposix_spawn_file_actions_destroy U\nposix_spawn_file_actions_init U\nqsort U\nraise U\nrand U\nreadlink U\nrealloc U\nremove U\nrename U\nsetrlimit U\nsetvbuf U\nsigaction U\nsigaltstack U\nsigemptyset U\nsigfillset U\nsigprocmask U\nsnprintf U\nsprintf U\nsrand U\nsscanf U\nstderr U\nstdin U\nstdout U\nstrcat U\nstrchr U\nstrcmp U\nstrcpy U\nstrdup U\nstrerror U\nstrerror_r U\nstrftime_l U\nstrncmp U\nstrncpy U\nstrrchr U\nstrsep U\nstrtod_l U\nstrtof_l U\nstrtok_r U\nstrtol U\nstrtold_l U\nstrtoll_l U\nstrtoull_l U\nsyscall U\nsysconf U\ntcgetattr U\nuname U\nungetc U\nunlink U\nuselocale U\nvasprintf U\nvfprintf U\nvsnprintf U\nvsscanf U\nwait4 U\nwaitpid U\nwcrtomb U\nwcslen U\nwcsnrtombs U\nwmemcpy U\nwmemmove U\nwmemset U\nwrite U\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/CMakeLists.txt",
    "content": "include(CompilerRTCompile)\n\nclang_compiler_add_cxx_check()\n\n# FIXME: use SANITIZER_COMMON_SUPPORTED_ARCH here\nfilter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el riscv64 sparcv9 sparc)\nif(APPLE)\n  darwin_filter_host_archs(SANITIZER_UNITTEST_SUPPORTED_ARCH SANITIZER_UNITTEST_SUPPORTED_ARCH)\nendif()\n\nset(SANITIZER_UNITTESTS\n  sanitizer_addrhashmap_test.cpp\n  sanitizer_allocator_test.cpp\n  sanitizer_atomic_test.cpp\n  sanitizer_bitvector_test.cpp\n  sanitizer_bvgraph_test.cpp\n  sanitizer_chained_origin_depot_test.cpp\n  sanitizer_common_test.cpp\n  sanitizer_deadlock_detector_test.cpp\n  sanitizer_dense_map_test.cpp\n  sanitizer_flags_test.cpp\n  sanitizer_flat_map_test.cpp\n  sanitizer_format_interceptor_test.cpp\n  sanitizer_hash_test.cpp\n  sanitizer_ioctl_test.cpp\n  sanitizer_leb128_test.cpp\n  sanitizer_libc_test.cpp\n  sanitizer_linux_test.cpp\n  sanitizer_list_test.cpp\n  sanitizer_lzw_test.cpp\n  sanitizer_mac_test.cpp\n  sanitizer_mutex_test.cpp\n  sanitizer_nolibc_test.cpp\n  sanitizer_posix_test.cpp\n  sanitizer_printf_test.cpp\n  sanitizer_procmaps_test.cpp\n  sanitizer_ring_buffer_test.cpp\n  sanitizer_quarantine_test.cpp\n  sanitizer_stack_store_test.cpp\n  sanitizer_stackdepot_test.cpp\n  sanitizer_stacktrace_printer_test.cpp\n  sanitizer_stacktrace_test.cpp\n  sanitizer_stoptheworld_test.cpp\n  sanitizer_suppressions_test.cpp\n  sanitizer_symbolizer_test.cpp\n  sanitizer_test_main.cpp\n  sanitizer_thread_registry_test.cpp\n  sanitizer_type_traits_test.cpp\n  sanitizer_vector_test.cpp\n  )\n\nset(SANITIZER_TEST_HEADERS\n  sanitizer_pthread_wrappers.h\n  sanitizer_test_config.h\n  sanitizer_test_utils.h\n  )\nforeach(header ${SANITIZER_IMPL_HEADERS})\n  list(APPEND SANITIZER_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})\nendforeach()\n\nset(SANITIZER_TEST_CFLAGS_COMMON\n  ${COMPILER_RT_UNITTEST_CFLAGS}\n  ${COMPILER_RT_GTEST_CFLAGS}\n  ${COMPILER_RT_GMOCK_CFLAGS}\n  -I${COMPILER_RT_SOURCE_DIR}/include\n  -I${COMPILER_RT_SOURCE_DIR}/lib\n  -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common\n  -fno-rtti\n  -O2\n  -Werror=sign-compare\n  -Wno-gnu-zero-variadic-macro-arguments\n  )\n\nset(SANITIZER_TEST_LINK_FLAGS_COMMON ${COMPILER_RT_UNITTEST_LINK_FLAGS})\n\n# -gline-tables-only must be enough for these tests, so use it if possible.\nif(COMPILER_RT_TEST_COMPILER_ID MATCHES \"Clang\")\n  list(APPEND SANITIZER_TEST_CFLAGS_COMMON -gline-tables-only)\nelse()\n  list(APPEND SANITIZER_TEST_CFLAGS_COMMON -g)\nendif()\nif(MSVC)\n  list(APPEND SANITIZER_TEST_CFLAGS_COMMON -gcodeview)\nendif()\nlist(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON -g)\n\nif(NOT MSVC)\n  list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON --driver-mode=g++)\nendif()\n\nif(ANDROID)\n  list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON -pie)\nendif()\n\nif(APPLE)\n  list(APPEND SANITIZER_TEST_CFLAGS_COMMON ${DARWIN_osx_CFLAGS})\n  list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON ${DARWIN_osx_LINK_FLAGS})\n\n  add_weak_symbols(\"sanitizer_common\" WEAK_SYMBOL_LINK_FLAGS)\n  list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON ${WEAK_SYMBOL_LINK_FLAGS})\nendif()\n\n# MSVC linker is allocating 1M for the stack by default, which is not\n# enough for the unittests. Some unittests require more than 2M.\n# The default stack size for clang is 8M.\nif(MSVC)\n  list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON -Wl,/STACK:0xC00000)\nendif()\n\nset(SANITIZER_TEST_LINK_LIBS)\nappend_list_if(COMPILER_RT_HAS_LIBLOG log SANITIZER_TEST_LINK_LIBS)\n# NDK r10 requires -latomic almost always.\nappend_list_if(ANDROID atomic SANITIZER_TEST_LINK_LIBS)\n\nappend_list_if(COMPILER_RT_HAS_LIBDL -ldl SANITIZER_TEST_LINK_FLAGS_COMMON)\nappend_list_if(COMPILER_RT_HAS_LIBRT -lrt SANITIZER_TEST_LINK_FLAGS_COMMON)\nappend_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread SANITIZER_TEST_LINK_FLAGS_COMMON)\n# x86_64 FreeBSD 9.2 additionally requires libc++ to build the tests. Also,\n# 'libm' shall be specified explicitly to build i386 tests.\nif(CMAKE_SYSTEM MATCHES \"FreeBSD-9.2-RELEASE\")\n  list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON \"-lc++ -lm\")\nendif()\n\ninclude_directories(..)\ninclude_directories(../..)\n\n# Adds static library which contains sanitizer_common object file\n# (universal binary on Mac and arch-specific object files on Linux).\nmacro(add_sanitizer_common_lib library)\n  add_library(${library} STATIC ${ARGN})\n  set_target_properties(${library} PROPERTIES\n    ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}\n    FOLDER \"Compiler-RT Runtime tests\")\nendmacro()\n\nfunction(get_sanitizer_common_lib_for_arch arch lib)\n  if(APPLE)\n    set(tgt_name \"RTSanitizerCommon.test.osx\")\n  else()\n    set(tgt_name \"RTSanitizerCommon.test.${arch}\")\n  endif()\n  set(${lib} \"${tgt_name}\" PARENT_SCOPE)\nendfunction()\n\n# Sanitizer_common unit tests testsuite.\nadd_custom_target(SanitizerUnitTests)\nset_target_properties(SanitizerUnitTests PROPERTIES FOLDER \"Compiler-RT Tests\")\n\n# Adds sanitizer tests for architecture.\nmacro(add_sanitizer_tests_for_arch arch)\n  set(extra_flags)\n  if( CMAKE_SIZEOF_VOID_P EQUAL 4 )\n    list(APPEND extra_flags \"-D_LARGEFILE_SOURCE\")\n    list(APPEND extra_flags \"-D_FILE_OFFSET_BITS=64\")\n  endif()\n  get_sanitizer_common_lib_for_arch(${arch} SANITIZER_COMMON_LIB)\n\n  set(SANITIZER_TEST_OBJECTS)\n  generate_compiler_rt_tests(SANITIZER_TEST_OBJECTS SanitizerUnitTests\n    \"Sanitizer-${arch}-Test\" ${arch}\n    RUNTIME \"${SANITIZER_COMMON_LIB}\"\n    SOURCES ${SANITIZER_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE} ${COMPILER_RT_GMOCK_SOURCE}\n    COMPILE_DEPS ${SANITIZER_TEST_HEADERS}\n    DEPS gtest\n    CFLAGS  ${SANITIZER_TEST_CFLAGS_COMMON} ${extra_flags}\n    LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS_COMMON} ${extra_flags})\n\n  if(\"${CMAKE_SYSTEM_NAME}\" STREQUAL \"Linux\" AND \"${arch}\" STREQUAL \"x86_64\")\n    # Test that the libc-independent part of sanitizer_common is indeed\n    # independent of libc, by linking this binary without libc (here) and\n    # executing it (unit test in sanitizer_nolibc_test.cpp).\n    get_target_flags_for_arch(${arch} TARGET_FLAGS)\n    clang_compile(sanitizer_nolibc_test_main.${arch}.o\n                  sanitizer_nolibc_test_main.cpp\n                  CFLAGS ${SANITIZER_TEST_CFLAGS_COMMON} ${TARGET_FLAGS}\n                  DEPS ${SANITIZER_TEST_COMPILE_DEPS})\n    add_compiler_rt_test(SanitizerUnitTests \"Sanitizer-${arch}-Test-Nolibc\" ${arch}\n                         OBJECTS sanitizer_nolibc_test_main.${arch}.o\n                                 -Wl,-whole-archive\n                                 libRTSanitizerCommon.test.nolibc.${arch}.a\n                                 -Wl,-no-whole-archive\n                         DEPS sanitizer_nolibc_test_main.${arch}.o\n                              RTSanitizerCommon.test.nolibc.${arch}\n                         LINK_FLAGS -static -nostdlib ${TARGET_FLAGS})\n  endif()\nendmacro()\n\nif(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID)\n  # We use just-built clang to build sanitizer_common unittests, so we must\n  # be sure that produced binaries would work.\n  if(APPLE)\n    add_sanitizer_common_lib(\"RTSanitizerCommon.test.osx\"\n                             $<TARGET_OBJECTS:RTSanitizerCommon.osx>\n                             $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>\n                             $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.osx>)\n  else()\n    if(CAN_TARGET_x86_64)\n      add_sanitizer_common_lib(\"RTSanitizerCommon.test.nolibc.x86_64\"\n                               $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>\n                               $<TARGET_OBJECTS:RTSanitizerCommonNoLibc.x86_64>)\n    endif()\n    foreach(arch ${SANITIZER_UNITTEST_SUPPORTED_ARCH})\n      add_sanitizer_common_lib(\"RTSanitizerCommon.test.${arch}\"\n                               $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>\n                               $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>\n                               $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>)\n    endforeach()\n  endif()\n  foreach(arch ${SANITIZER_UNITTEST_SUPPORTED_ARCH})\n    add_sanitizer_tests_for_arch(${arch})\n  endforeach()\nendif()\n\nif(ANDROID)\n  foreach(arch ${SANITIZER_COMMON_SUPPORTED_ARCH})\n    add_executable(SanitizerTest\n      ${SANITIZER_UNITTESTS}\n      ${COMPILER_RT_GTEST_SOURCE}\n      ${COMPILER_RT_GMOCK_SOURCE}\n      $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>\n      $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>\n      $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>)\n    set_target_compile_flags(SanitizerTest ${SANITIZER_TEST_CFLAGS_COMMON})\n    # Setup correct output directory and link flags.\n    set_target_properties(SanitizerTest PROPERTIES\n      RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})\n    set_target_link_flags(SanitizerTest ${SANITIZER_TEST_LINK_FLAGS_COMMON})\n    target_link_libraries(SanitizerTest ${SANITIZER_TEST_LINK_LIBS})\n    # Add unit test to test suite.\n    add_dependencies(SanitizerUnitTests SanitizerTest)\n  endforeach()\nendif()\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/malloc_stress_transfer_test.cpp",
    "content": "#include <thread>\n\nconst size_t kAllocSize = 16;\nconst size_t kInitialNumAllocs = 1 << 10;\nconst size_t kPeriodicNumAllocs = 1 << 10;\nconst size_t kNumIterations = 1 << 7;\nconst size_t kNumThreads = 16;\n\nvoid Thread() {\n  char *InitialAllocations[kInitialNumAllocs];\n  char *PeriodicaAllocations[kPeriodicNumAllocs];\n  for (auto &p : InitialAllocations) p = new char[kAllocSize];\n  for (size_t i = 0; i < kNumIterations; i++) {\n    for (size_t j = 0; j < kPeriodicNumAllocs; j++) {\n      for (auto &p : PeriodicaAllocations) {\n        p = new char[kAllocSize];\n        *p = 0;\n      }\n      for (auto p : PeriodicaAllocations) delete [] p;\n    }\n  }\n  for (auto p : InitialAllocations) delete [] p;\n}\n\nint main() {\n  std::thread *Threads[kNumThreads];\n  for (auto &T : Threads) T = new std::thread(&Thread);\n  for (auto T : Threads) {\n    T->join();\n    delete T;\n  }\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_addrhashmap_test.cpp",
    "content": "//===-- sanitizer_addrhashmap_test.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_addrhashmap.h\"\n\n#include <unordered_map>\n\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nstruct Value {\n  int payload;\n  inline bool operator==(const Value& rhs) const {\n    return payload == rhs.payload;\n  }\n};\n\nusing MapTy = AddrHashMap<Value, 11>;\nusing HandleTy = MapTy::Handle;\nusing RefMapTy = std::unordered_map<uptr, Value>;\n\nstatic void ExistsInReferenceMap(const uptr key, const Value& val, void* arg) {\n  RefMapTy* ref = reinterpret_cast<RefMapTy*>(arg);\n  const RefMapTy::iterator iter = ref->find(key);\n  ASSERT_NE(iter, ref->end());\n  EXPECT_EQ(iter->second, val);\n  ref->erase(iter);\n}\n\nTEST(AddrHashMap, Basic) {\n  // Use a reference implementation to compare with.\n  RefMapTy reference_map{\n      {0x1000, {1}},\n      {0x2000, {2}},\n      {0x3000, {3}},\n  };\n\n  MapTy m;\n\n  for (const auto& key_val : reference_map) {\n    const uptr key = key_val.first;\n    const Value val = key_val.second;\n\n    // Insert all the elements.\n    {\n      HandleTy h(&m, key);\n      ASSERT_TRUE(h.created());\n      h->payload = val.payload;\n    }\n  }\n\n  // Now check that all the elements are present.\n  m.ForEach(ExistsInReferenceMap, &reference_map);\n  EXPECT_TRUE(reference_map.empty());\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_allocator_test.cpp",
    "content": "//===-- sanitizer_allocator_test.cpp --------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Tests for sanitizer_allocator.h.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_allocator.h\"\n#include \"sanitizer_common/sanitizer_allocator_internal.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n\n#include \"sanitizer_test_utils.h\"\n#include \"sanitizer_pthread_wrappers.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <algorithm>\n#include <vector>\n#include <random>\n#include <set>\n\nusing namespace __sanitizer;\n\n#if SANITIZER_SOLARIS && defined(__sparcv9)\n// FIXME: These tests probably fail because Solaris/sparcv9 uses the full\n// 64-bit address space.  Needs more investigation\n#define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x\n#else\n#define SKIP_ON_SOLARIS_SPARCV9(x) x\n#endif\n\n// On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't\n// use size class maps with a large number of classes, as that will make the\n// SizeClassAllocator64 region size too small (< 2^32).\n#if SANITIZER_ANDROID && defined(__aarch64__)\n#define ALLOCATOR64_SMALL_SIZE 1\n#elif SANITIZER_RISCV64\n#define ALLOCATOR64_SMALL_SIZE 1\n#else\n#define ALLOCATOR64_SMALL_SIZE 0\n#endif\n\n// Too slow for debug build\n#if !SANITIZER_DEBUG\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n#if SANITIZER_WINDOWS\n// On Windows 64-bit there is no easy way to find a large enough fixed address\n// space that is always available. Thus, a dynamically allocated address space\n// is used instead (i.e. ~(uptr)0).\nstatic const uptr kAllocatorSpace = ~(uptr)0;\nstatic const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G\nstatic const u64 kAddressSpaceSize = 1ULL << 47;\ntypedef DefaultSizeClassMap SizeClassMap;\n#elif SANITIZER_ANDROID && defined(__aarch64__)\nstatic const uptr kAllocatorSpace = 0x3000000000ULL;\nstatic const uptr kAllocatorSize  = 0x2000000000ULL;\nstatic const u64 kAddressSpaceSize = 1ULL << 39;\ntypedef VeryCompactSizeClassMap SizeClassMap;\n#elif SANITIZER_RISCV64\nconst uptr kAllocatorSpace = ~(uptr)0;\nconst uptr kAllocatorSize = 0x2000000000ULL;  // 128G.\nstatic const u64 kAddressSpaceSize = 1ULL << 38;\ntypedef VeryDenseSizeClassMap SizeClassMap;\n#else\nstatic const uptr kAllocatorSpace = 0x700000000000ULL;\nstatic const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.\nstatic const u64 kAddressSpaceSize = 1ULL << 47;\ntypedef DefaultSizeClassMap SizeClassMap;\n#endif\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP64 {  // Allocator Params. Short name for shorter demangled names..\n  static const uptr kSpaceBeg = kAllocatorSpace;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 16;\n  typedef ::SizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP64Dyn {\n  static const uptr kSpaceBeg = ~(uptr)0;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 16;\n  typedef ::SizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP64Compact {\n  static const uptr kSpaceBeg = ~(uptr)0;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 16;\n  typedef CompactSizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP64VeryCompact {\n  static const uptr kSpaceBeg = ~(uptr)0;\n  static const uptr kSpaceSize = 1ULL << 37;\n  static const uptr kMetadataSize = 16;\n  typedef VeryCompactSizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP64Dense {\n  static const uptr kSpaceBeg = kAllocatorSpace;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 16;\n  typedef DenseSizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\ntemplate <typename AddressSpaceView>\nusing Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;\nusing Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;\n\ntemplate <typename AddressSpaceView>\nusing Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;\nusing Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;\n\ntemplate <typename AddressSpaceView>\nusing Allocator64CompactASVT =\n    SizeClassAllocator64<AP64Compact<AddressSpaceView>>;\nusing Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;\n\ntemplate <typename AddressSpaceView>\nusing Allocator64VeryCompactASVT =\n    SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;\nusing Allocator64VeryCompact =\n    Allocator64VeryCompactASVT<LocalAddressSpaceView>;\n\ntemplate <typename AddressSpaceView>\nusing Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;\nusing Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;\n\n#elif defined(__mips64)\nstatic const u64 kAddressSpaceSize = 1ULL << 40;\n#elif defined(__aarch64__)\nstatic const u64 kAddressSpaceSize = 1ULL << 39;\n#elif defined(__s390x__)\nstatic const u64 kAddressSpaceSize = 1ULL << 53;\n#elif defined(__s390__)\nstatic const u64 kAddressSpaceSize = 1ULL << 31;\n#else\nstatic const u64 kAddressSpaceSize = 1ULL << 32;\n#endif\n\nstatic const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP32Compact {\n  static const uptr kSpaceBeg = 0;\n  static const u64 kSpaceSize = kAddressSpaceSize;\n  static const uptr kMetadataSize = 16;\n  typedef CompactSizeClassMap SizeClassMap;\n  static const uptr kRegionSizeLog = ::kRegionSizeLog;\n  using AddressSpaceView = AddressSpaceViewTy;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n};\ntemplate <typename AddressSpaceView>\nusing Allocator32CompactASVT =\n    SizeClassAllocator32<AP32Compact<AddressSpaceView>>;\nusing Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;\n\ntemplate <class SizeClassMap>\nvoid TestSizeClassMap() {\n  typedef SizeClassMap SCMap;\n  SCMap::Print();\n  SCMap::Validate();\n}\n\nTEST(SanitizerCommon, DefaultSizeClassMap) {\n  TestSizeClassMap<DefaultSizeClassMap>();\n}\n\nTEST(SanitizerCommon, CompactSizeClassMap) {\n  TestSizeClassMap<CompactSizeClassMap>();\n}\n\nTEST(SanitizerCommon, VeryCompactSizeClassMap) {\n  TestSizeClassMap<VeryCompactSizeClassMap>();\n}\n\nTEST(SanitizerCommon, InternalSizeClassMap) {\n  TestSizeClassMap<InternalSizeClassMap>();\n}\n\nTEST(SanitizerCommon, DenseSizeClassMap) {\n  TestSizeClassMap<VeryCompactSizeClassMap>();\n}\n\ntemplate <class Allocator>\nvoid TestSizeClassAllocator(uptr premapped_heap = 0) {\n  Allocator *a = new Allocator;\n  a->Init(kReleaseToOSIntervalNever, premapped_heap);\n  typename Allocator::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n\n  static const uptr sizes[] = {\n    1, 16,  30, 40, 100, 1000, 10000,\n    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000\n  };\n\n  std::vector<void *> allocated;\n\n  uptr last_total_allocated = 0;\n  for (int i = 0; i < 3; i++) {\n    // Allocate a bunch of chunks.\n    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {\n      uptr size = sizes[s];\n      if (!a->CanAllocate(size, 1)) continue;\n      // printf(\"s = %ld\\n\", size);\n      uptr n_iter = std::max((uptr)6, 4000000 / size);\n      // fprintf(stderr, \"size: %ld iter: %ld\\n\", size, n_iter);\n      for (uptr i = 0; i < n_iter; i++) {\n        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);\n        char *x = (char*)cache.Allocate(a, class_id0);\n        x[0] = 0;\n        x[size - 1] = 0;\n        x[size / 2] = 0;\n        allocated.push_back(x);\n        CHECK_EQ(x, a->GetBlockBegin(x));\n        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));\n        CHECK(a->PointerIsMine(x));\n        CHECK(a->PointerIsMine(x + size - 1));\n        CHECK(a->PointerIsMine(x + size / 2));\n        CHECK_GE(a->GetActuallyAllocatedSize(x), size);\n        uptr class_id = a->GetSizeClass(x);\n        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));\n        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));\n        metadata[0] = reinterpret_cast<uptr>(x) + 1;\n        metadata[1] = 0xABCD;\n      }\n    }\n    // Deallocate all.\n    for (uptr i = 0; i < allocated.size(); i++) {\n      void *x = allocated[i];\n      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));\n      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);\n      CHECK_EQ(metadata[1], 0xABCD);\n      cache.Deallocate(a, a->GetSizeClass(x), x);\n    }\n    allocated.clear();\n    uptr total_allocated = a->TotalMemoryUsed();\n    if (last_total_allocated == 0)\n      last_total_allocated = total_allocated;\n    CHECK_EQ(last_total_allocated, total_allocated);\n  }\n\n  // Check that GetBlockBegin never crashes.\n  for (uptr x = 0, step = kAddressSpaceSize / 100000;\n       x < kAddressSpaceSize - step; x += step)\n    if (a->PointerIsMine(reinterpret_cast<void *>(x)))\n      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));\n\n  a->TestOnlyUnmap();\n  delete a;\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n\n// Allocates kAllocatorSize aligned bytes on construction and frees it on\n// destruction.\nclass ScopedPremappedHeap {\n public:\n  ScopedPremappedHeap() {\n    BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, \"preallocated heap\");\n    AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);\n  }\n\n  ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }\n\n  uptr Addr() { return AlignedAddr; }\n\n private:\n  void *BasePtr;\n  uptr AlignedAddr;\n};\n\n// These tests can fail on Windows if memory is somewhat full and lit happens\n// to run them all at the same time. FIXME: Make them not flaky and reenable.\n#if !SANITIZER_WINDOWS\nTEST(SanitizerCommon, SizeClassAllocator64) {\n  TestSizeClassAllocator<Allocator64>();\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64Dynamic) {\n  TestSizeClassAllocator<Allocator64Dynamic>();\n}\n\n#if !ALLOCATOR64_SMALL_SIZE\n// Android only has 39-bit address space, so mapping 2 * kAllocatorSize\n// sometimes fails.\nTEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {\n  ScopedPremappedHeap h;\n  TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64Compact) {\n  TestSizeClassAllocator<Allocator64Compact>();\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64Dense) {\n  TestSizeClassAllocator<Allocator64Dense>();\n}\n#endif\n\nTEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {\n  TestSizeClassAllocator<Allocator64VeryCompact>();\n}\n#endif\n#endif\n\nTEST(SanitizerCommon, SizeClassAllocator32Compact) {\n  TestSizeClassAllocator<Allocator32Compact>();\n}\n\ntemplate <typename AddressSpaceViewTy>\nstruct AP32SeparateBatches {\n  static const uptr kSpaceBeg = 0;\n  static const u64 kSpaceSize = kAddressSpaceSize;\n  static const uptr kMetadataSize = 16;\n  typedef DefaultSizeClassMap SizeClassMap;\n  static const uptr kRegionSizeLog = ::kRegionSizeLog;\n  using AddressSpaceView = AddressSpaceViewTy;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags =\n      SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;\n};\ntemplate <typename AddressSpaceView>\nusing Allocator32SeparateBatchesASVT =\n    SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;\nusing Allocator32SeparateBatches =\n    Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;\n\nTEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {\n  TestSizeClassAllocator<Allocator32SeparateBatches>();\n}\n\ntemplate <class Allocator>\nvoid SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {\n  Allocator *a = new Allocator;\n  a->Init(kReleaseToOSIntervalNever, premapped_heap);\n  typename Allocator::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n\n  const uptr kNumAllocs = 1 << 13;\n  void *allocated[kNumAllocs];\n  void *meta[kNumAllocs];\n  for (uptr i = 0; i < kNumAllocs; i++) {\n    void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));\n    allocated[i] = x;\n    meta[i] = a->GetMetaData(x);\n  }\n  // Get Metadata kNumAllocs^2 times.\n  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {\n    uptr idx = i % kNumAllocs;\n    void *m = a->GetMetaData(allocated[idx]);\n    EXPECT_EQ(m, meta[idx]);\n  }\n  for (uptr i = 0; i < kNumAllocs; i++) {\n    cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);\n  }\n\n  a->TestOnlyUnmap();\n  delete a;\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n// These tests can fail on Windows if memory is somewhat full and lit happens\n// to run them all at the same time. FIXME: Make them not flaky and reenable.\n#if !SANITIZER_WINDOWS\nTEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {\n  SizeClassAllocatorMetadataStress<Allocator64>();\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {\n  SizeClassAllocatorMetadataStress<Allocator64Dynamic>();\n}\n\n#if !ALLOCATOR64_SMALL_SIZE\nTEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {\n  ScopedPremappedHeap h;\n  SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {\n  SizeClassAllocatorMetadataStress<Allocator64Compact>();\n}\n#endif\n\n#endif\n#endif  // SANITIZER_CAN_USE_ALLOCATOR64\nTEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {\n  SizeClassAllocatorMetadataStress<Allocator32Compact>();\n}\n\ntemplate <class Allocator>\nvoid SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,\n                                           uptr premapped_heap = 0) {\n  Allocator *a = new Allocator;\n  a->Init(kReleaseToOSIntervalNever, premapped_heap);\n  typename Allocator::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n\n  uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;\n  uptr size = Allocator::SizeClassMapT::Size(max_size_class);\n  // Make sure we correctly compute GetBlockBegin() w/o overflow.\n  for (size_t i = 0; i <= TotalSize / size; i++) {\n    void *x = cache.Allocate(a, max_size_class);\n    void *beg = a->GetBlockBegin(x);\n    // if ((i & (i - 1)) == 0)\n    //   fprintf(stderr, \"[%zd] %p %p\\n\", i, x, beg);\n    EXPECT_EQ(x, beg);\n  }\n\n  a->TestOnlyUnmap();\n  delete a;\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n// These tests can fail on Windows if memory is somewhat full and lit happens\n// to run them all at the same time. FIXME: Make them not flaky and reenable.\n#if !SANITIZER_WINDOWS\nTEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {\n  SizeClassAllocatorGetBlockBeginStress<Allocator64>(\n      1ULL << (SANITIZER_ANDROID ? 31 : 33));\n}\nTEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {\n  SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(\n      1ULL << (SANITIZER_ANDROID ? 31 : 33));\n}\n#if !ALLOCATOR64_SMALL_SIZE\nTEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {\n  ScopedPremappedHeap h;\n  SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(\n      1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());\n}\nTEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {\n  SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);\n}\n#endif\nTEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {\n  // Does not have > 4Gb for each class.\n  SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);\n}\nTEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {\n  SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);\n}\n#endif\n#endif  // SANITIZER_CAN_USE_ALLOCATOR64\n\nstruct TestMapUnmapCallback {\n  static int map_count, unmap_count;\n  void OnMap(uptr p, uptr size) const { map_count++; }\n  void OnUnmap(uptr p, uptr size) const { unmap_count++; }\n};\nint TestMapUnmapCallback::map_count;\nint TestMapUnmapCallback::unmap_count;\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n// These tests can fail on Windows if memory is somewhat full and lit happens\n// to run them all at the same time. FIXME: Make them not flaky and reenable.\n#if !SANITIZER_WINDOWS\n\ntemplate <typename AddressSpaceViewTy = LocalAddressSpaceView>\nstruct AP64WithCallback {\n  static const uptr kSpaceBeg = kAllocatorSpace;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 16;\n  typedef ::SizeClassMap SizeClassMap;\n  typedef TestMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\nTEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {\n  TestMapUnmapCallback::map_count = 0;\n  TestMapUnmapCallback::unmap_count = 0;\n  typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;\n  Allocator64WithCallBack *a = new Allocator64WithCallBack;\n  a->Init(kReleaseToOSIntervalNever);\n  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.\n  typename Allocator64WithCallBack::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n  AllocatorStats stats;\n  stats.Init();\n  const size_t kNumChunks = 128;\n  uint32_t chunks[kNumChunks];\n  a->GetFromAllocator(&stats, 30, chunks, kNumChunks);\n  // State + alloc + metadata + freearray.\n  EXPECT_EQ(TestMapUnmapCallback::map_count, 4);\n  a->TestOnlyUnmap();\n  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.\n  delete a;\n}\n#endif\n#endif\n\ntemplate <typename AddressSpaceViewTy = LocalAddressSpaceView>\nstruct AP32WithCallback {\n  static const uptr kSpaceBeg = 0;\n  static const u64 kSpaceSize = kAddressSpaceSize;\n  static const uptr kMetadataSize = 16;\n  typedef CompactSizeClassMap SizeClassMap;\n  static const uptr kRegionSizeLog = ::kRegionSizeLog;\n  using AddressSpaceView = AddressSpaceViewTy;\n  typedef TestMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n};\n\nTEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {\n  TestMapUnmapCallback::map_count = 0;\n  TestMapUnmapCallback::unmap_count = 0;\n  typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;\n  Allocator32WithCallBack *a = new Allocator32WithCallBack;\n  a->Init(kReleaseToOSIntervalNever);\n  EXPECT_EQ(TestMapUnmapCallback::map_count, 0);\n  Allocator32WithCallBack::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n  AllocatorStats stats;\n  stats.Init();\n  a->AllocateBatch(&stats, &cache, 32);\n  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);\n  a->TestOnlyUnmap();\n  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);\n  delete a;\n  // fprintf(stderr, \"Map: %d Unmap: %d\\n\",\n  //         TestMapUnmapCallback::map_count,\n  //         TestMapUnmapCallback::unmap_count);\n}\n\nTEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {\n  TestMapUnmapCallback::map_count = 0;\n  TestMapUnmapCallback::unmap_count = 0;\n  LargeMmapAllocator<TestMapUnmapCallback> a;\n  a.Init();\n  AllocatorStats stats;\n  stats.Init();\n  void *x = a.Allocate(&stats, 1 << 20, 1);\n  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);\n  a.Deallocate(&stats, x);\n  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);\n}\n\n// Don't test OOM conditions on Win64 because it causes other tests on the same\n// machine to OOM.\n#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64\nTEST(SanitizerCommon, SizeClassAllocator64Overflow) {\n  Allocator64 a;\n  a.Init(kReleaseToOSIntervalNever);\n  Allocator64::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n  AllocatorStats stats;\n  stats.Init();\n\n  const size_t kNumChunks = 128;\n  uint32_t chunks[kNumChunks];\n  bool allocation_failed = false;\n  for (int i = 0; i < 1000000; i++) {\n    uptr class_id = a.kNumClasses - 1;\n    if (!a.GetFromAllocator(&stats, class_id, chunks, kNumChunks)) {\n      allocation_failed = true;\n      break;\n    }\n  }\n  EXPECT_EQ(allocation_failed, true);\n\n  a.TestOnlyUnmap();\n}\n#endif\n\nTEST(SanitizerCommon, LargeMmapAllocator) {\n  LargeMmapAllocator<NoOpMapUnmapCallback> a;\n  a.Init();\n  AllocatorStats stats;\n  stats.Init();\n\n  static const int kNumAllocs = 1000;\n  char *allocated[kNumAllocs];\n  static const uptr size = 4000;\n  // Allocate some.\n  for (int i = 0; i < kNumAllocs; i++) {\n    allocated[i] = (char *)a.Allocate(&stats, size, 1);\n    CHECK(a.PointerIsMine(allocated[i]));\n  }\n  // Deallocate all.\n  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);\n  for (int i = 0; i < kNumAllocs; i++) {\n    char *p = allocated[i];\n    CHECK(a.PointerIsMine(p));\n    a.Deallocate(&stats, p);\n  }\n  // Check that non left.\n  CHECK_EQ(a.TotalMemoryUsed(), 0);\n\n  // Allocate some more, also add metadata.\n  for (int i = 0; i < kNumAllocs; i++) {\n    char *x = (char *)a.Allocate(&stats, size, 1);\n    CHECK_GE(a.GetActuallyAllocatedSize(x), size);\n    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));\n    *meta = i;\n    allocated[i] = x;\n  }\n  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {\n    char *p = allocated[i % kNumAllocs];\n    CHECK(a.PointerIsMine(p));\n    CHECK(a.PointerIsMine(p + 2000));\n  }\n  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);\n  // Deallocate all in reverse order.\n  for (int i = 0; i < kNumAllocs; i++) {\n    int idx = kNumAllocs - i - 1;\n    char *p = allocated[idx];\n    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));\n    CHECK_EQ(*meta, idx);\n    CHECK(a.PointerIsMine(p));\n    a.Deallocate(&stats, p);\n  }\n  CHECK_EQ(a.TotalMemoryUsed(), 0);\n\n  // Test alignments. Test with 512MB alignment on x64 non-Windows machines.\n  // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.\n  uptr max_alignment =\n      (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);\n  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {\n    const uptr kNumAlignedAllocs = 100;\n    for (uptr i = 0; i < kNumAlignedAllocs; i++) {\n      uptr size = ((i % 10) + 1) * 4096;\n      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);\n      CHECK_EQ(p, a.GetBlockBegin(p));\n      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));\n      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));\n      CHECK_EQ(0, (uptr)allocated[i] % alignment);\n      p[0] = p[size - 1] = 0;\n    }\n    for (uptr i = 0; i < kNumAlignedAllocs; i++) {\n      a.Deallocate(&stats, allocated[i]);\n    }\n  }\n\n  // Regression test for boundary condition in GetBlockBegin().\n  uptr page_size = GetPageSizeCached();\n  char *p = (char *)a.Allocate(&stats, page_size, 1);\n  CHECK_EQ(p, a.GetBlockBegin(p));\n  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));\n  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));\n  a.Deallocate(&stats, p);\n}\n\ntemplate <class PrimaryAllocator>\nvoid TestCombinedAllocator(uptr premapped_heap = 0) {\n  typedef CombinedAllocator<PrimaryAllocator> Allocator;\n  Allocator *a = new Allocator;\n  a->Init(kReleaseToOSIntervalNever, premapped_heap);\n  std::mt19937 r;\n\n  typename Allocator::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  a->InitCache(&cache);\n\n  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);\n  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);\n  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);\n  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);\n  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);\n  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);\n\n  const uptr kNumAllocs = 100000;\n  const uptr kNumIter = 10;\n  for (uptr iter = 0; iter < kNumIter; iter++) {\n    std::vector<void*> allocated;\n    for (uptr i = 0; i < kNumAllocs; i++) {\n      uptr size = (i % (1 << 14)) + 1;\n      if ((i % 1024) == 0)\n        size = 1 << (10 + (i % 14));\n      void *x = a->Allocate(&cache, size, 1);\n      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));\n      CHECK_EQ(*meta, 0);\n      *meta = size;\n      allocated.push_back(x);\n    }\n\n    std::shuffle(allocated.begin(), allocated.end(), r);\n\n    // Test ForEachChunk(...)\n    {\n      std::set<void *> reported_chunks;\n      auto cb = [](uptr chunk, void *arg) {\n        auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);\n        auto pair =\n            reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));\n        // Check chunk is never reported more than once.\n        ASSERT_TRUE(pair.second);\n      };\n      a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));\n      for (const auto &allocated_ptr : allocated) {\n        ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());\n      }\n    }\n\n    for (uptr i = 0; i < kNumAllocs; i++) {\n      void *x = allocated[i];\n      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));\n      CHECK_NE(*meta, 0);\n      CHECK(a->PointerIsMine(x));\n      *meta = 0;\n      a->Deallocate(&cache, x);\n    }\n    allocated.clear();\n    a->SwallowCache(&cache);\n  }\n  a->DestroyCache(&cache);\n  a->TestOnlyUnmap();\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\nTEST(SanitizerCommon, CombinedAllocator64) {\n  TestCombinedAllocator<Allocator64>();\n}\n\nTEST(SanitizerCommon, CombinedAllocator64Dynamic) {\n  TestCombinedAllocator<Allocator64Dynamic>();\n}\n\n#if !ALLOCATOR64_SMALL_SIZE\n#if !SANITIZER_WINDOWS\n// Windows fails to map 1TB, so disable this test.\nTEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {\n  ScopedPremappedHeap h;\n  TestCombinedAllocator<Allocator64Dynamic>(h.Addr());\n}\n#endif\n\nTEST(SanitizerCommon, CombinedAllocator64Compact) {\n  TestCombinedAllocator<Allocator64Compact>();\n}\n#endif\n\nTEST(SanitizerCommon, CombinedAllocator64VeryCompact) {\n  TestCombinedAllocator<Allocator64VeryCompact>();\n}\n#endif\n\nTEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {\n  TestCombinedAllocator<Allocator32Compact>();\n}\n\ntemplate <class Allocator>\nvoid TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {\n  using AllocatorCache = typename Allocator::AllocatorCache;\n  AllocatorCache cache;\n  Allocator *a = new Allocator();\n\n  a->Init(kReleaseToOSIntervalNever, premapped_heap);\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n\n  const uptr kNumAllocs = 10000;\n  const int kNumIter = 100;\n  uptr saved_total = 0;\n  for (int class_id = 1; class_id <= 5; class_id++) {\n    for (int it = 0; it < kNumIter; it++) {\n      void *allocated[kNumAllocs];\n      for (uptr i = 0; i < kNumAllocs; i++) {\n        allocated[i] = cache.Allocate(a, class_id);\n      }\n      for (uptr i = 0; i < kNumAllocs; i++) {\n        cache.Deallocate(a, class_id, allocated[i]);\n      }\n      cache.Drain(a);\n      uptr total_allocated = a->TotalMemoryUsed();\n      if (it)\n        CHECK_EQ(saved_total, total_allocated);\n      saved_total = total_allocated;\n    }\n  }\n\n  a->TestOnlyUnmap();\n  delete a;\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n// These tests can fail on Windows if memory is somewhat full and lit happens\n// to run them all at the same time. FIXME: Make them not flaky and reenable.\n#if !SANITIZER_WINDOWS\nTEST(SanitizerCommon, SizeClassAllocator64LocalCache) {\n  TestSizeClassAllocatorLocalCache<Allocator64>();\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {\n  TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();\n}\n\n#if !ALLOCATOR64_SMALL_SIZE\nTEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {\n  ScopedPremappedHeap h;\n  TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {\n  TestSizeClassAllocatorLocalCache<Allocator64Compact>();\n}\n#endif\nTEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {\n  TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();\n}\n#endif\n#endif\n\nTEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {\n  TestSizeClassAllocatorLocalCache<Allocator32Compact>();\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\ntypedef Allocator64::AllocatorCache AllocatorCache;\nstatic AllocatorCache static_allocator_cache;\n\nvoid *AllocatorLeakTestWorker(void *arg) {\n  typedef AllocatorCache::Allocator Allocator;\n  Allocator *a = (Allocator*)(arg);\n  static_allocator_cache.Allocate(a, 10);\n  static_allocator_cache.Drain(a);\n  return 0;\n}\n\nTEST(SanitizerCommon, AllocatorLeakTest) {\n  typedef AllocatorCache::Allocator Allocator;\n  Allocator a;\n  a.Init(kReleaseToOSIntervalNever);\n  uptr total_used_memory = 0;\n  for (int i = 0; i < 100; i++) {\n    pthread_t t;\n    PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);\n    PTHREAD_JOIN(t, 0);\n    if (i == 0)\n      total_used_memory = a.TotalMemoryUsed();\n    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);\n  }\n\n  a.TestOnlyUnmap();\n}\n\n// Struct which is allocated to pass info to new threads.  The new thread frees\n// it.\nstruct NewThreadParams {\n  AllocatorCache *thread_cache;\n  AllocatorCache::Allocator *allocator;\n  uptr class_id;\n};\n\n// Called in a new thread.  Just frees its argument.\nstatic void *DeallocNewThreadWorker(void *arg) {\n  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);\n  params->thread_cache->Deallocate(params->allocator, params->class_id, params);\n  return NULL;\n}\n\n// The allocator cache is supposed to be POD and zero initialized.  We should be\n// able to call Deallocate on a zeroed cache, and it will self-initialize.\nTEST(Allocator, AllocatorCacheDeallocNewThread) {\n  AllocatorCache::Allocator allocator;\n  allocator.Init(kReleaseToOSIntervalNever);\n  AllocatorCache main_cache;\n  AllocatorCache child_cache;\n  memset(&main_cache, 0, sizeof(main_cache));\n  memset(&child_cache, 0, sizeof(child_cache));\n\n  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));\n  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(\n      main_cache.Allocate(&allocator, class_id));\n  params->thread_cache = &child_cache;\n  params->allocator = &allocator;\n  params->class_id = class_id;\n  pthread_t t;\n  PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);\n  PTHREAD_JOIN(t, 0);\n\n  allocator.TestOnlyUnmap();\n}\n#endif\n\nTEST(Allocator, Basic) {\n  char *p = (char*)InternalAlloc(10);\n  EXPECT_NE(p, (char*)0);\n  char *p2 = (char*)InternalAlloc(20);\n  EXPECT_NE(p2, (char*)0);\n  EXPECT_NE(p2, p);\n  InternalFree(p);\n  InternalFree(p2);\n}\n\nTEST(Allocator, Stress) {\n  const int kCount = 1000;\n  char *ptrs[kCount];\n  unsigned rnd = 42;\n  for (int i = 0; i < kCount; i++) {\n    uptr sz = my_rand_r(&rnd) % 1000;\n    char *p = (char*)InternalAlloc(sz);\n    EXPECT_NE(p, (char*)0);\n    ptrs[i] = p;\n  }\n  for (int i = 0; i < kCount; i++) {\n    InternalFree(ptrs[i]);\n  }\n}\n\nTEST(Allocator, LargeAlloc) {\n  void *p = InternalAlloc(10 << 20);\n  InternalFree(p);\n}\n\nTEST(Allocator, ScopedBuffer) {\n  const int kSize = 512;\n  {\n    InternalMmapVector<int> int_buf(kSize);\n    EXPECT_EQ((uptr)kSize, int_buf.size());\n  }\n  InternalMmapVector<char> char_buf(kSize);\n  EXPECT_EQ((uptr)kSize, char_buf.size());\n  internal_memset(char_buf.data(), 'c', kSize);\n  for (int i = 0; i < kSize; i++) {\n    EXPECT_EQ('c', char_buf[i]);\n  }\n}\n\nvoid IterationTestCallback(uptr chunk, void *arg) {\n  reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);\n}\n\ntemplate <class Allocator>\nvoid TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {\n  Allocator *a = new Allocator;\n  a->Init(kReleaseToOSIntervalNever, premapped_heap);\n  typename Allocator::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n\n  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,\n    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};\n\n  std::vector<void *> allocated;\n\n  // Allocate a bunch of chunks.\n  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {\n    uptr size = sizes[s];\n    if (!a->CanAllocate(size, 1)) continue;\n    // printf(\"s = %ld\\n\", size);\n    uptr n_iter = std::max((uptr)6, 80000 / size);\n    // fprintf(stderr, \"size: %ld iter: %ld\\n\", size, n_iter);\n    for (uptr j = 0; j < n_iter; j++) {\n      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);\n      void *x = cache.Allocate(a, class_id0);\n      allocated.push_back(x);\n    }\n  }\n\n  std::set<uptr> reported_chunks;\n  a->ForceLock();\n  a->ForEachChunk(IterationTestCallback, &reported_chunks);\n  a->ForceUnlock();\n\n  for (uptr i = 0; i < allocated.size(); i++) {\n    // Don't use EXPECT_NE. Reporting the first mismatch is enough.\n    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),\n              reported_chunks.end());\n  }\n\n  a->TestOnlyUnmap();\n  delete a;\n}\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n// These tests can fail on Windows if memory is somewhat full and lit happens\n// to run them all at the same time. FIXME: Make them not flaky and reenable.\n#if !SANITIZER_WINDOWS\nTEST(SanitizerCommon, SizeClassAllocator64Iteration) {\n  TestSizeClassAllocatorIteration<Allocator64>();\n}\nTEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {\n  TestSizeClassAllocatorIteration<Allocator64Dynamic>();\n}\n#if !ALLOCATOR64_SMALL_SIZE\nTEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {\n  ScopedPremappedHeap h;\n  TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());\n}\n#endif\n#endif\n#endif\n\nTEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) {\n  TestSizeClassAllocatorIteration<Allocator32Compact>();\n}\n\nTEST(SanitizerCommon, LargeMmapAllocatorIteration) {\n  LargeMmapAllocator<NoOpMapUnmapCallback> a;\n  a.Init();\n  AllocatorStats stats;\n  stats.Init();\n\n  static const uptr kNumAllocs = 1000;\n  char *allocated[kNumAllocs];\n  static const uptr size = 40;\n  // Allocate some.\n  for (uptr i = 0; i < kNumAllocs; i++)\n    allocated[i] = (char *)a.Allocate(&stats, size, 1);\n\n  std::set<uptr> reported_chunks;\n  a.ForceLock();\n  a.ForEachChunk(IterationTestCallback, &reported_chunks);\n  a.ForceUnlock();\n\n  for (uptr i = 0; i < kNumAllocs; i++) {\n    // Don't use EXPECT_NE. Reporting the first mismatch is enough.\n    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),\n              reported_chunks.end());\n  }\n  for (uptr i = 0; i < kNumAllocs; i++)\n    a.Deallocate(&stats, allocated[i]);\n}\n\nTEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {\n  LargeMmapAllocator<NoOpMapUnmapCallback> a;\n  a.Init();\n  AllocatorStats stats;\n  stats.Init();\n\n  static const uptr kNumAllocs = 1024;\n  static const uptr kNumExpectedFalseLookups = 10000000;\n  char *allocated[kNumAllocs];\n  static const uptr size = 4096;\n  // Allocate some.\n  for (uptr i = 0; i < kNumAllocs; i++) {\n    allocated[i] = (char *)a.Allocate(&stats, size, 1);\n  }\n\n  a.ForceLock();\n  for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {\n    // if ((i & (i - 1)) == 0) fprintf(stderr, \"[%zd]\\n\", i);\n    char *p1 = allocated[i % kNumAllocs];\n    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));\n    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));\n    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));\n    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));\n  }\n\n  for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {\n    void *p = reinterpret_cast<void *>(i % 1024);\n    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));\n    p = reinterpret_cast<void *>(~0L - (i % 1024));\n    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));\n  }\n  a.ForceUnlock();\n\n  for (uptr i = 0; i < kNumAllocs; i++)\n    a.Deallocate(&stats, allocated[i]);\n}\n\n\n// Don't test OOM conditions on Win64 because it causes other tests on the same\n// machine to OOM.\n#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64\ntypedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap;\ntemplate <typename AddressSpaceViewTy = LocalAddressSpaceView>\nstruct AP64_SpecialSizeClassMap {\n  static const uptr kSpaceBeg = kAllocatorSpace;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 0;\n  typedef SpecialSizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags = 0;\n  using AddressSpaceView = AddressSpaceViewTy;\n};\n\n// Regression test for out-of-memory condition in PopulateFreeList().\nTEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {\n  // In a world where regions are small and chunks are huge...\n  typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;\n  const uptr kRegionSize =\n      kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;\n  SpecialAllocator64 *a = new SpecialAllocator64;\n  a->Init(kReleaseToOSIntervalNever);\n  SpecialAllocator64::AllocatorCache cache;\n  memset(&cache, 0, sizeof(cache));\n  cache.Init(0);\n\n  // ...one man is on a mission to overflow a region with a series of\n  // successive allocations.\n\n  const uptr kClassID = ALLOCATOR64_SMALL_SIZE ? 18 : 24;\n  const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);\n  ASSERT_LT(2 * kAllocationSize, kRegionSize);\n  ASSERT_GT(3 * kAllocationSize, kRegionSize);\n  EXPECT_NE(cache.Allocate(a, kClassID), nullptr);\n  EXPECT_NE(cache.Allocate(a, kClassID), nullptr);\n  EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);\n\n  const uptr Class2 = ALLOCATOR64_SMALL_SIZE ? 15 : 21;\n  const uptr Size2 = SpecialSizeClassMap::Size(Class2);\n  ASSERT_EQ(Size2 * 8, kRegionSize);\n  char *p[7];\n  for (int i = 0; i < 7; i++) {\n    p[i] = (char*)cache.Allocate(a, Class2);\n    EXPECT_NE(p[i], nullptr);\n    fprintf(stderr, \"p[%d] %p s = %lx\\n\", i, (void*)p[i], Size2);\n    p[i][Size2 - 1] = 42;\n    if (i) ASSERT_LT(p[i - 1], p[i]);\n  }\n  EXPECT_EQ(cache.Allocate(a, Class2), nullptr);\n  cache.Deallocate(a, Class2, p[0]);\n  cache.Drain(a);\n  ASSERT_EQ(p[6][Size2 - 1], 42);\n  a->TestOnlyUnmap();\n  delete a;\n}\n\n#endif\n\n#if SANITIZER_CAN_USE_ALLOCATOR64\n\nclass NoMemoryMapper {\n public:\n  uptr last_request_buffer_size = 0;\n\n  u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {\n    last_request_buffer_size = buffer_size * sizeof(u64);\n    return nullptr;\n  }\n};\n\nclass RedZoneMemoryMapper {\n public:\n  RedZoneMemoryMapper() {\n    const auto page_size = GetPageSize();\n    buffer = MmapOrDie(3ULL * page_size, \"\");\n    MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);\n    MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);\n  }\n  ~RedZoneMemoryMapper() { UnmapOrDie(buffer, 3 * GetPageSize()); }\n\n  u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {\n    buffer_size *= sizeof(u64);\n    const auto page_size = GetPageSize();\n    CHECK_EQ(buffer_size, page_size);\n    u64 *p =\n        reinterpret_cast<u64 *>(reinterpret_cast<uptr>(buffer) + page_size);\n    memset(p, 0, page_size);\n    return p;\n  }\n\n private:\n  void *buffer;\n};\n\nTEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {\n  NoMemoryMapper no_memory_mapper;\n  for (int i = 0; i < 64; i++) {\n    // Various valid counter's max values packed into one word.\n    Allocator64::PackedCounterArray counters_2n(1, 1ULL << i,\n                                                &no_memory_mapper);\n    EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);\n\n    // Check the \"all bit set\" values too.\n    Allocator64::PackedCounterArray counters_2n1_1(1, ~0ULL >> i,\n                                                   &no_memory_mapper);\n    EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);\n\n    // Verify the packing ratio, the counter is expected to be packed into the\n    // closest power of 2 bits.\n    Allocator64::PackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);\n    EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),\n              no_memory_mapper.last_request_buffer_size);\n  }\n\n  RedZoneMemoryMapper memory_mapper;\n  // Go through 1, 2, 4, 8, .. 64 bits per counter.\n  for (int i = 0; i < 7; i++) {\n    // Make sure counters request one memory page for the buffer.\n    const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);\n    Allocator64::PackedCounterArray counters(\n        kNumCounters, 1ULL << ((1 << i) - 1), &memory_mapper);\n    counters.Inc(0);\n    for (u64 c = 1; c < kNumCounters - 1; c++) {\n      ASSERT_EQ(0ULL, counters.Get(c));\n      counters.Inc(c);\n      ASSERT_EQ(1ULL, counters.Get(c - 1));\n    }\n    ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));\n    counters.Inc(kNumCounters - 1);\n\n    if (i > 0) {\n      counters.IncRange(0, kNumCounters - 1);\n      for (u64 c = 0; c < kNumCounters; c++)\n        ASSERT_EQ(2ULL, counters.Get(c));\n    }\n  }\n}\n\nclass RangeRecorder {\n public:\n  std::string reported_pages;\n\n  RangeRecorder()\n      : page_size_scaled_log(\n            Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),\n        last_page_reported(0) {}\n\n  void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {\n    from >>= page_size_scaled_log;\n    to >>= page_size_scaled_log;\n    ASSERT_LT(from, to);\n    if (!reported_pages.empty())\n      ASSERT_LT(last_page_reported, from);\n    reported_pages.append(from - last_page_reported, '.');\n    reported_pages.append(to - from, 'x');\n    last_page_reported = to;\n  }\n\n private:\n  const uptr page_size_scaled_log;\n  u32 last_page_reported;\n};\n\nTEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {\n  typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;\n\n  // 'x' denotes a page to be released, '.' denotes a page to be kept around.\n  const char* test_cases[] = {\n      \"\",\n      \".\",\n      \"x\",\n      \"........\",\n      \"xxxxxxxxxxx\",\n      \"..............xxxxx\",\n      \"xxxxxxxxxxxxxxxxxx.....\",\n      \"......xxxxxxxx........\",\n      \"xxx..........xxxxxxxxxxxxxxx\",\n      \"......xxxx....xxxx........\",\n      \"xxx..........xxxxxxxx....xxxxxxx\",\n      \"x.x.x.x.x.x.x.x.x.x.x.x.\",\n      \".x.x.x.x.x.x.x.x.x.x.x.x\",\n      \".x.x.x.x.x.x.x.x.x.x.x.x.\",\n      \"x.x.x.x.x.x.x.x.x.x.x.x.x\",\n  };\n\n  for (auto test_case : test_cases) {\n    RangeRecorder range_recorder;\n    RangeTracker tracker(&range_recorder, 1);\n    for (int i = 0; test_case[i] != 0; i++)\n      tracker.NextPage(test_case[i] == 'x');\n    tracker.Done();\n    // Strip trailing '.'-pages before comparing the results as they are not\n    // going to be reported to range_recorder anyway.\n    const char* last_x = strrchr(test_case, 'x');\n    std::string expected(\n        test_case,\n        last_x == nullptr ? 0 : (last_x - test_case + 1));\n    EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());\n  }\n}\n\nclass ReleasedPagesTrackingMemoryMapper {\n public:\n  std::set<u32> reported_pages;\n  std::vector<u64> buffer;\n\n  u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {\n    reported_pages.clear();\n    buffer.assign(buffer_size, 0);\n    return buffer.data();\n  }\n  void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {\n    uptr page_size_scaled =\n        GetPageSizeCached() >> Allocator64::kCompactPtrScale;\n    for (u32 i = from; i < to; i += page_size_scaled)\n      reported_pages.insert(i);\n  }\n};\n\ntemplate <class Allocator>\nvoid TestReleaseFreeMemoryToOS() {\n  ReleasedPagesTrackingMemoryMapper memory_mapper;\n  const uptr kAllocatedPagesCount = 1024;\n  const uptr page_size = GetPageSizeCached();\n  const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;\n  std::mt19937 r;\n  uint32_t rnd_state = 42;\n\n  for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;\n      class_id++) {\n    const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);\n    const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;\n    const uptr max_chunks =\n        kAllocatedPagesCount * GetPageSizeCached() / chunk_size;\n\n    // Generate the random free list.\n    std::vector<u32> free_array;\n    bool in_free_range = false;\n    uptr current_range_end = 0;\n    for (uptr i = 0; i < max_chunks; i++) {\n      if (i == current_range_end) {\n        in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;\n        current_range_end += my_rand_r(&rnd_state) % 100 + 1;\n      }\n      if (in_free_range)\n        free_array.push_back(i * chunk_size_scaled);\n    }\n    if (free_array.empty())\n      continue;\n    // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on\n    // the list ordering.\n    std::shuffle(free_array.begin(), free_array.end(), r);\n\n    Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),\n                                     chunk_size, kAllocatedPagesCount,\n                                     &memory_mapper, class_id);\n\n    // Verify that there are no released pages touched by used chunks and all\n    // ranges of free chunks big enough to contain the entire memory pages had\n    // these pages released.\n    uptr verified_released_pages = 0;\n    std::set<u32> free_chunks(free_array.begin(), free_array.end());\n\n    u32 current_chunk = 0;\n    in_free_range = false;\n    u32 current_free_range_start = 0;\n    for (uptr i = 0; i <= max_chunks; i++) {\n      bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();\n\n      if (is_free_chunk) {\n        if (!in_free_range) {\n          in_free_range = true;\n          current_free_range_start = current_chunk;\n        }\n      } else {\n        // Verify that this used chunk does not touch any released page.\n        for (uptr i_page = current_chunk / page_size_scaled;\n             i_page <= (current_chunk + chunk_size_scaled - 1) /\n                       page_size_scaled;\n             i_page++) {\n          bool page_released =\n              memory_mapper.reported_pages.find(i_page * page_size_scaled) !=\n              memory_mapper.reported_pages.end();\n          ASSERT_EQ(false, page_released);\n        }\n\n        if (in_free_range) {\n          in_free_range = false;\n          // Verify that all entire memory pages covered by this range of free\n          // chunks were released.\n          u32 page = RoundUpTo(current_free_range_start, page_size_scaled);\n          while (page + page_size_scaled <= current_chunk) {\n            bool page_released =\n                memory_mapper.reported_pages.find(page) !=\n                memory_mapper.reported_pages.end();\n            ASSERT_EQ(true, page_released);\n            verified_released_pages++;\n            page += page_size_scaled;\n          }\n        }\n      }\n\n      current_chunk += chunk_size_scaled;\n    }\n\n    ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);\n  }\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {\n  TestReleaseFreeMemoryToOS<Allocator64>();\n}\n\n#if !ALLOCATOR64_SMALL_SIZE\nTEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {\n  TestReleaseFreeMemoryToOS<Allocator64Compact>();\n}\n\nTEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {\n  TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();\n}\n#endif  // !ALLOCATOR64_SMALL_SIZE\n\n#endif  // SANITIZER_CAN_USE_ALLOCATOR64\n\nTEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) {\n  // When allocating a memory block slightly bigger than a memory page and\n  // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round\n  // the size up to the page size, so that subsequent calls to the allocator\n  // can use the remaining space in the last allocated page.\n  static LowLevelAllocator allocator;\n  char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16);\n  char *ptr2 = (char *)allocator.Allocate(16);\n  EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16);\n}\n\n#endif  // #if !SANITIZER_DEBUG\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_allocator_testlib.cpp",
    "content": "//===-- sanitizer_allocator_testlib.cpp -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Malloc replacement library based on CombinedAllocator.\n// The primary purpose of this file is an end-to-end integration test\n// for CombinedAllocator.\n//===----------------------------------------------------------------------===//\n/* Usage:\nclang++ -std=c++11 -fno-exceptions  -g -fPIC -I. -I../include -Isanitizer \\\n sanitizer_common/tests/sanitizer_allocator_testlib.cpp \\\n $(\\ls sanitizer_common/sanitizer_*.cpp | grep -v sanitizer_common_nolibc.cpp) \\\n  sanitizer_common/sanitizer_linux_x86_64.S \\\n -shared -lpthread -o testmalloc.so\nLD_PRELOAD=`pwd`/testmalloc.so /your/app\n*/\n#include \"sanitizer_common/sanitizer_allocator.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include <stddef.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <string.h>\n#include <pthread.h>\n\n#ifndef SANITIZER_MALLOC_HOOK\n# define SANITIZER_MALLOC_HOOK(p, s)\n#endif\n\n#ifndef SANITIZER_FREE_HOOK\n# define SANITIZER_FREE_HOOK(p)\n#endif\n\nstatic const uptr kAllocatorSpace = 0x600000000000ULL;\nstatic const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.\n\nstruct __AP64 {\n  static const uptr kSpaceBeg = ~(uptr)0;\n  static const uptr kSpaceSize = kAllocatorSize;\n  static const uptr kMetadataSize = 0;\n  typedef CompactSizeClassMap SizeClassMap;\n  typedef NoOpMapUnmapCallback MapUnmapCallback;\n  static const uptr kFlags =\n      SizeClassAllocator64FlagMasks::kRandomShuffleChunks;\n};\n\nnamespace {\n\ntypedef SizeClassAllocator64<__AP64> PrimaryAllocator;\ntypedef CombinedAllocator<PrimaryAllocator> Allocator;\ntypedef Allocator::AllocatorCache AllocatorCache;\n\nstatic Allocator allocator;\nstatic bool global_inited;\nstatic THREADLOCAL AllocatorCache cache;\nstatic THREADLOCAL bool thread_inited;\nstatic pthread_key_t pkey;\n\nstatic void thread_dtor(void *v) {\n  if ((uptr)v != 3) {\n    pthread_setspecific(pkey, (void*)((uptr)v + 1));\n    return;\n  }\n  allocator.SwallowCache(&cache);\n}\n\nstatic size_t GetRss() {\n  if (FILE *f = fopen(\"/proc/self/statm\", \"r\")) {\n    size_t size = 0, rss = 0;\n    fscanf(f, \"%zd %zd\", &size, &rss);\n    fclose(f);\n    return rss << 12;  // rss is in pages.\n  }\n  return 0;\n}\n\nstruct AtExit {\n  ~AtExit() {\n    allocator.PrintStats();\n    Printf(\"RSS: %zdM\\n\", GetRss() >> 20);\n  }\n};\n\nstatic AtExit at_exit;\n\nstatic void NOINLINE thread_init() {\n  if (!global_inited) {\n    global_inited = true;\n    allocator.Init(false /*may_return_null*/);\n    pthread_key_create(&pkey, thread_dtor);\n  }\n  thread_inited = true;\n  pthread_setspecific(pkey, (void*)1);\n  cache.Init(nullptr);\n}\n}  // namespace\n\nextern \"C\" {\nvoid *malloc(size_t size) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  void *p = allocator.Allocate(&cache, size, 8);\n  SANITIZER_MALLOC_HOOK(p, size);\n  return p;\n}\n\nvoid free(void *p) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  SANITIZER_FREE_HOOK(p);\n  allocator.Deallocate(&cache, p);\n}\n\nvoid *calloc(size_t nmemb, size_t size) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  size *= nmemb;\n  void *p = allocator.Allocate(&cache, size, 8, false);\n  memset(p, 0, size);\n  SANITIZER_MALLOC_HOOK(p, size);\n  return p;\n}\n\nvoid *realloc(void *p, size_t size) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  if (p) {\n    SANITIZER_FREE_HOOK(p);\n  }\n  p = allocator.Reallocate(&cache, p, size, 8);\n  if (p) {\n    SANITIZER_MALLOC_HOOK(p, size);\n  }\n  return p;\n}\n\n#if SANITIZER_INTERCEPT_MEMALIGN\nvoid *memalign(size_t alignment, size_t size) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  void *p = allocator.Allocate(&cache, size, alignment);\n  SANITIZER_MALLOC_HOOK(p, size);\n  return p;\n}\n#endif // SANITIZER_INTERCEPT_MEMALIGN\n\nint posix_memalign(void **memptr, size_t alignment, size_t size) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  *memptr = allocator.Allocate(&cache, size, alignment);\n  SANITIZER_MALLOC_HOOK(*memptr, size);\n  return 0;\n}\n\nvoid *valloc(size_t size) {\n  if (UNLIKELY(!thread_inited))\n    thread_init();\n  if (size == 0)\n    size = GetPageSizeCached();\n  void *p = allocator.Allocate(&cache, size, GetPageSizeCached());\n  SANITIZER_MALLOC_HOOK(p, size);\n  return p;\n}\n\n#if SANITIZER_INTERCEPT_CFREE\nvoid cfree(void *p) ALIAS(\"free\");\n#endif // SANITIZER_INTERCEPT_CFREE\n#if SANITIZER_INTERCEPT_PVALLOC\nvoid *pvalloc(size_t size) ALIAS(\"valloc\");\n#endif // SANITIZER_INTERCEPT_PVALLOC\n#if SANITIZER_INTERCEPT_MEMALIGN\nvoid *__libc_memalign(size_t alignment, size_t size) ALIAS(\"memalign\");\n#endif // SANITIZER_INTERCEPT_MEMALIGN\n\nvoid malloc_usable_size() {\n}\n\n#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO\nvoid mallinfo() {\n}\n\nvoid mallopt() {\n}\n#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO\n}  // extern \"C\"\n\nnamespace std {\n  struct nothrow_t;\n}\n\nvoid *operator new(size_t size) ALIAS(\"malloc\");\nvoid *operator new[](size_t size) ALIAS(\"malloc\");\nvoid *operator new(size_t size, std::nothrow_t const&) ALIAS(\"malloc\");\nvoid *operator new[](size_t size, std::nothrow_t const&) ALIAS(\"malloc\");\nvoid operator delete(void *ptr) throw() ALIAS(\"free\");\nvoid operator delete[](void *ptr) throw() ALIAS(\"free\");\nvoid operator delete(void *ptr, std::nothrow_t const&) ALIAS(\"free\");\nvoid operator delete[](void *ptr, std::nothrow_t const&) ALIAS(\"free\");\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_atomic_test.cpp",
    "content": "//===-- sanitizer_atomic_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_atomic.h\"\n#include \"gtest/gtest.h\"\n\n#ifndef __has_extension\n#define __has_extension(x) 0\n#endif\n\n#ifndef ATOMIC_LLONG_LOCK_FREE\n#  if __has_extension(c_atomic) || __has_extension(cxx_atomic)\n#    define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE\n#  elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)\n#    define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE\n#  else\n#    error Unsupported compiler.\n#  endif\n#endif\n\nnamespace __sanitizer {\n\ntemplate<typename T>\nstruct ValAndMagic {\n  typename T::Type magic0;\n  T a;\n  typename T::Type magic1;\n\n  static ValAndMagic<T> *sink;\n};\n\ntemplate<typename T>\nValAndMagic<T> *ValAndMagic<T>::sink;\n\ntemplate<typename T, memory_order load_mo, memory_order store_mo>\nvoid CheckStoreLoad() {\n  typedef typename T::Type Type;\n  ValAndMagic<T> val;\n  // Prevent the compiler from scalarizing the struct.\n  ValAndMagic<T>::sink = &val;\n  // Ensure that surrounding memory is not overwritten.\n  val.magic0 = val.magic1 = (Type)-3;\n  for (u64 i = 0; i < 100; i++) {\n    // Generate a value that occupies all bytes of the variable.\n    u64 v = i;\n    v |= v << 8;\n    v |= v << 16;\n    v |= v << 32;\n    val.a.val_dont_use = (Type)v;\n    EXPECT_EQ(atomic_load(&val.a, load_mo), (Type)v);\n    val.a.val_dont_use = (Type)-1;\n    atomic_store(&val.a, (Type)v, store_mo);\n    EXPECT_EQ(val.a.val_dont_use, (Type)v);\n  }\n  EXPECT_EQ(val.magic0, (Type)-3);\n  EXPECT_EQ(val.magic1, (Type)-3);\n}\n\nTEST(SanitizerCommon, AtomicStoreLoad) {\n  CheckStoreLoad<atomic_uint8_t, memory_order_relaxed, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint8_t, memory_order_consume, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint8_t, memory_order_acquire, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint8_t, memory_order_relaxed, memory_order_release>();\n  CheckStoreLoad<atomic_uint8_t, memory_order_seq_cst, memory_order_seq_cst>();\n\n  CheckStoreLoad<atomic_uint16_t, memory_order_relaxed, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint16_t, memory_order_consume, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint16_t, memory_order_acquire, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint16_t, memory_order_relaxed, memory_order_release>();\n  CheckStoreLoad<atomic_uint16_t, memory_order_seq_cst, memory_order_seq_cst>();\n\n  CheckStoreLoad<atomic_uint32_t, memory_order_relaxed, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint32_t, memory_order_consume, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint32_t, memory_order_acquire, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint32_t, memory_order_relaxed, memory_order_release>();\n  CheckStoreLoad<atomic_uint32_t, memory_order_seq_cst, memory_order_seq_cst>();\n\n  // Avoid fallbacking to software emulated compiler atomics, that are usually\n  // provided by libatomic, which is not always present.\n#if ATOMIC_LLONG_LOCK_FREE == 2\n  CheckStoreLoad<atomic_uint64_t, memory_order_relaxed, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint64_t, memory_order_consume, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint64_t, memory_order_acquire, memory_order_relaxed>();\n  CheckStoreLoad<atomic_uint64_t, memory_order_relaxed, memory_order_release>();\n  CheckStoreLoad<atomic_uint64_t, memory_order_seq_cst, memory_order_seq_cst>();\n#endif\n\n  CheckStoreLoad<atomic_uintptr_t, memory_order_relaxed, memory_order_relaxed>\n      ();\n  CheckStoreLoad<atomic_uintptr_t, memory_order_consume, memory_order_relaxed>\n      ();\n  CheckStoreLoad<atomic_uintptr_t, memory_order_acquire, memory_order_relaxed>\n      ();\n  CheckStoreLoad<atomic_uintptr_t, memory_order_relaxed, memory_order_release>\n      ();\n  CheckStoreLoad<atomic_uintptr_t, memory_order_seq_cst, memory_order_seq_cst>\n      ();\n}\n\n// Clang crashes while compiling this test for Android:\n// http://llvm.org/bugs/show_bug.cgi?id=15587\n#if !SANITIZER_ANDROID\ntemplate<typename T>\nvoid CheckAtomicCompareExchange() {\n  typedef typename T::Type Type;\n  {\n    Type old_val = 42;\n    Type new_val = 24;\n    Type var = old_val;\n    EXPECT_TRUE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val,\n                                               memory_order_relaxed));\n    EXPECT_FALSE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val,\n                                                memory_order_relaxed));\n    EXPECT_EQ(new_val, old_val);\n  }\n  {\n    Type old_val = 42;\n    Type new_val = 24;\n    Type var = old_val;\n    EXPECT_TRUE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val,\n                                             memory_order_relaxed));\n    EXPECT_FALSE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val,\n                                              memory_order_relaxed));\n    EXPECT_EQ(new_val, old_val);\n  }\n}\n\nTEST(SanitizerCommon, AtomicCompareExchangeTest) {\n  CheckAtomicCompareExchange<atomic_uint8_t>();\n  CheckAtomicCompareExchange<atomic_uint16_t>();\n  CheckAtomicCompareExchange<atomic_uint32_t>();\n#if ATOMIC_LLONG_LOCK_FREE == 2\n  CheckAtomicCompareExchange<atomic_uint64_t>();\n#endif\n  CheckAtomicCompareExchange<atomic_uintptr_t>();\n}\n#endif  //!SANITIZER_ANDROID\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_bitvector_test.cpp",
    "content": "//===-- sanitizer_bitvector_test.cpp --------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// Tests for sanitizer_bitvector.h.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_bitvector.h\"\n\n#include \"sanitizer_test_utils.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <algorithm>\n#include <vector>\n#include <random>\n#include <set>\n\nusing namespace __sanitizer;\nusing namespace std;\n\n\n// Check the 'bv' == 's' and that the indexes go in increasing order.\n// Also check the BV::Iterator\ntemplate <class BV>\nstatic void CheckBV(const BV &bv, const set<uptr> &s) {\n  BV t;\n  t.copyFrom(bv);\n  set<uptr> t_s(s);\n  uptr last_idx = bv.size();\n  uptr count = 0;\n  for (typename BV::Iterator it(bv); it.hasNext();) {\n    uptr idx = it.next();\n    count++;\n    if (last_idx != bv.size())\n      EXPECT_LT(last_idx, idx);\n    last_idx = idx;\n    EXPECT_TRUE(s.count(idx));\n  }\n  EXPECT_EQ(count, s.size());\n\n  last_idx = bv.size();\n  while (!t.empty()) {\n    uptr idx = t.getAndClearFirstOne();\n    if (last_idx != bv.size())\n      EXPECT_LT(last_idx, idx);\n    last_idx = idx;\n    EXPECT_TRUE(t_s.erase(idx));\n  }\n  EXPECT_TRUE(t_s.empty());\n}\n\ntemplate <class BV>\nvoid Print(const BV &bv) {\n  BV t;\n  t.copyFrom(bv);\n  while (!t.empty()) {\n    uptr idx = t.getAndClearFirstOne();\n    fprintf(stderr, \"%lu \", idx);\n  }\n  fprintf(stderr, \"\\n\");\n}\n\nvoid Print(const set<uptr> &s) {\n  for (set<uptr>::iterator it = s.begin(); it != s.end(); ++it) {\n#if defined(_WIN64)\n    fprintf(stderr, \"%llu \", *it);\n#else\n    fprintf(stderr, \"%zu \", *it);\n#endif\n  }\n  fprintf(stderr, \"\\n\");\n}\n\ntemplate <class BV>\nvoid TestBitVector(uptr expected_size) {\n  std::mt19937 r;\n  BV bv, bv1, t_bv;\n  EXPECT_EQ(expected_size, BV::kSize);\n  bv.clear();\n  EXPECT_TRUE(bv.empty());\n  bv.setBit(5);\n  EXPECT_FALSE(bv.empty());\n  EXPECT_FALSE(bv.getBit(4));\n  EXPECT_FALSE(bv.getBit(6));\n  EXPECT_TRUE(bv.getBit(5));\n  bv.clearBit(5);\n  EXPECT_FALSE(bv.getBit(5));\n\n  // test random bits\n  bv.clear();\n  set<uptr> s;\n  for (uptr it = 0; it < 1000; it++) {\n    uptr bit = ((uptr)my_rand() % bv.size());\n    EXPECT_EQ(bv.getBit(bit), s.count(bit) == 1);\n    switch (my_rand() % 2) {\n      case 0:\n        EXPECT_EQ(bv.setBit(bit), s.insert(bit).second);\n        break;\n      case 1:\n        size_t old_size = s.size();\n        s.erase(bit);\n        EXPECT_EQ(bv.clearBit(bit), old_size > s.size());\n        break;\n    }\n    EXPECT_EQ(bv.getBit(bit), s.count(bit) == 1);\n  }\n\n  vector<uptr>bits(bv.size());\n  // Test setUnion, setIntersection, setDifference,\n  // intersectsWith, and getAndClearFirstOne.\n  for (uptr it = 0; it < 30; it++) {\n    // iota\n    for (size_t j = 0; j < bits.size(); j++) bits[j] = j;\n    std::shuffle(bits.begin(), bits.end(), r);\n    set<uptr> s, s1, t_s;\n    bv.clear();\n    bv1.clear();\n    uptr n_bits = ((uptr)my_rand() % bv.size()) + 1;\n    uptr n_bits1 = (uptr)my_rand() % (bv.size() / 2);\n    EXPECT_TRUE(n_bits > 0 && n_bits <= bv.size());\n    EXPECT_TRUE(n_bits1 < bv.size() / 2);\n    for (uptr i = 0; i < n_bits; i++) {\n      bv.setBit(bits[i]);\n      s.insert(bits[i]);\n    }\n    CheckBV(bv, s);\n    for (uptr i = 0; i < n_bits1; i++) {\n      bv1.setBit(bits[bv.size() / 2 + i]);\n      s1.insert(bits[bv.size() / 2 + i]);\n    }\n    CheckBV(bv1, s1);\n\n    vector<uptr> vec;\n    set_intersection(s.begin(), s.end(), s1.begin(), s1.end(),\n                     back_insert_iterator<vector<uptr> >(vec));\n    EXPECT_EQ(bv.intersectsWith(bv1), !vec.empty());\n\n    // setUnion\n    t_s = s;\n    t_bv.copyFrom(bv);\n    t_s.insert(s1.begin(), s1.end());\n    EXPECT_EQ(t_bv.setUnion(bv1), s.size() != t_s.size());\n    CheckBV(t_bv, t_s);\n\n    // setIntersection\n    t_s = set<uptr>(vec.begin(), vec.end());\n    t_bv.copyFrom(bv);\n    EXPECT_EQ(t_bv.setIntersection(bv1), s.size() != t_s.size());\n    CheckBV(t_bv, t_s);\n\n    // setDifference\n    vec.clear();\n    set_difference(s.begin(), s.end(), s1.begin(), s1.end(),\n                     back_insert_iterator<vector<uptr> >(vec));\n    t_s = set<uptr>(vec.begin(), vec.end());\n    t_bv.copyFrom(bv);\n    EXPECT_EQ(t_bv.setDifference(bv1), s.size() != t_s.size());\n    CheckBV(t_bv, t_s);\n  }\n}\n\nTEST(SanitizerCommon, BasicBitVector) {\n  TestBitVector<BasicBitVector<u8> >(8);\n  TestBitVector<BasicBitVector<u16> >(16);\n  TestBitVector<BasicBitVector<> >(SANITIZER_WORDSIZE);\n}\n\nTEST(SanitizerCommon, TwoLevelBitVector) {\n  uptr ws = SANITIZER_WORDSIZE;\n  TestBitVector<TwoLevelBitVector<1, BasicBitVector<u8> > >(8 * 8);\n  TestBitVector<TwoLevelBitVector<> >(ws * ws);\n  TestBitVector<TwoLevelBitVector<2> >(ws * ws * 2);\n  TestBitVector<TwoLevelBitVector<3> >(ws * ws * 3);\n  TestBitVector<TwoLevelBitVector<3, BasicBitVector<u16> > >(16 * 16 * 3);\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_bvgraph_test.cpp",
    "content": "//===-- sanitizer_bvgraph_test.cpp ----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// Tests for sanitizer_bvgraph.h.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_bvgraph.h\"\n\n#include \"sanitizer_test_utils.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <algorithm>\n#include <vector>\n#include <set>\n\nusing namespace __sanitizer;\nusing namespace std;\n\ntypedef BasicBitVector<u8> BV1;\ntypedef BasicBitVector<> BV2;\ntypedef TwoLevelBitVector<> BV3;\ntypedef TwoLevelBitVector<3, BasicBitVector<u8> > BV4;\n\ntemplate<class G>\nvoid PrintGraph(const G &g) {\n  for (uptr i = 0; i < g.size(); i++) {\n    for (uptr j = 0; j < g.size(); j++) {\n      fprintf(stderr, \"%d\", g.hasEdge(i, j));\n    }\n    fprintf(stderr, \"\\n\");\n  }\n}\n\n\nclass SimpleGraph {\n public:\n  void clear() { s_.clear(); }\n  bool addEdge(uptr from, uptr to) {\n    return s_.insert(idx(from, to)).second;\n  }\n  bool removeEdge(uptr from, uptr to) {\n    return s_.erase(idx(from, to));\n  }\n  template <class G>\n  void checkSameAs(G *g) {\n    for (set<uptr>::iterator it = s_.begin(); it != s_.end(); ++it) {\n      uptr from = *it >> 16;\n      uptr to = *it & ((1 << 16) - 1);\n      EXPECT_TRUE(g->removeEdge(from, to));\n    }\n    EXPECT_TRUE(g->empty());\n  }\n private:\n  uptr idx(uptr from, uptr to) {\n    CHECK_LE(from|to, 1 << 16);\n    return (from << 16) + to;\n  }\n  set<uptr> s_;\n};\n\ntemplate <class BV>\nvoid BasicTest() {\n  BVGraph<BV> g;\n  g.clear();\n  BV target;\n  SimpleGraph s_g;\n  set<uptr> s;\n  set<uptr> s_target;\n  int num_reachable = 0;\n  for (int it = 0; it < 1000; it++) {\n    target.clear();\n    s_target.clear();\n    for (int t = 0; t < 4; t++) {\n      uptr idx = (uptr)my_rand() % g.size();\n      EXPECT_EQ(target.setBit(idx), s_target.insert(idx).second);\n    }\n    uptr from = my_rand() % g.size();\n    uptr to = my_rand() % g.size();\n    EXPECT_EQ(g.addEdge(from, to), s_g.addEdge(from, to));\n    EXPECT_TRUE(g.hasEdge(from, to));\n    for (int i = 0; i < 10; i++) {\n      from = my_rand() % g.size();\n      bool is_reachable = g.isReachable(from, target);\n      if (is_reachable) {\n        uptr path[BV::kSize];\n        uptr len;\n        for (len = 1; len < BV::kSize; len++) {\n          if (g.findPath(from, target, path, len) == len)\n            break;\n        }\n        EXPECT_LT(len, BV::kSize);\n        EXPECT_TRUE(target.getBit(path[len - 1]));\n        // fprintf(stderr, \"reachable: %zd; path %zd {%zd %zd %zd}\\n\",\n        //        from, len, path[0], path[1], path[2]);\n        num_reachable++;\n      }\n    }\n  }\n  EXPECT_GT(num_reachable, 0);\n}\n\nTEST(BVGraph, BasicTest) {\n  BasicTest<BV1>();\n  BasicTest<BV2>();\n  BasicTest<BV3>();\n  BasicTest<BV4>();\n}\n\ntemplate <class BV>\nvoid RemoveEdges() {\n  SimpleGraph s_g;\n  BVGraph<BV> g;\n  g.clear();\n  BV bv;\n  set<uptr> s;\n  for (int it = 0; it < 100; it++) {\n    s.clear();\n    bv.clear();\n    s_g.clear();\n    g.clear();\n    for (uptr j = 0; j < g.size() * 2; j++) {\n      uptr from = my_rand() % g.size();\n      uptr to = my_rand() % g.size();\n      EXPECT_EQ(g.addEdge(from, to), s_g.addEdge(from, to));\n    }\n    for (uptr j = 0; j < 5; j++) {\n      uptr idx = my_rand() % g.size();\n      s.insert(idx);\n      bv.setBit(idx);\n    }\n\n    if (it % 2) {\n      g.removeEdgesFrom(bv);\n      for (set<uptr>::iterator from = s.begin(); from != s.end(); ++from) {\n        for (uptr to = 0; to < g.size(); to++)\n          s_g.removeEdge(*from, to);\n      }\n    } else {\n      g.removeEdgesTo(bv);\n      for (set<uptr>::iterator to = s.begin(); to != s.end(); ++to) {\n        for (uptr from = 0; from < g.size(); from++)\n          s_g.removeEdge(from, *to);\n      }\n    }\n    s_g.checkSameAs(&g);\n  }\n}\n\nTEST(BVGraph, RemoveEdges) {\n  RemoveEdges<BV1>();\n  RemoveEdges<BV2>();\n  RemoveEdges<BV3>();\n  RemoveEdges<BV4>();\n}\n\ntemplate <class BV>\nvoid Test_isReachable() {\n  uptr path[5];\n  BVGraph<BV> g;\n  g.clear();\n  BV target;\n  target.clear();\n  uptr t0 = 0;\n  uptr t1 = g.size() - 1;\n  target.setBit(t0);\n  target.setBit(t1);\n\n  uptr f0 = 1;\n  uptr f1 = 2;\n  uptr f2 = g.size() / 2;\n  uptr f3 = g.size() - 2;\n\n  EXPECT_FALSE(g.isReachable(f0, target));\n  EXPECT_FALSE(g.isReachable(f1, target));\n  EXPECT_FALSE(g.isReachable(f2, target));\n  EXPECT_FALSE(g.isReachable(f3, target));\n\n  g.addEdge(f0, f1);\n  g.addEdge(f1, f2);\n  g.addEdge(f2, f3);\n  EXPECT_FALSE(g.isReachable(f0, target));\n  EXPECT_FALSE(g.isReachable(f1, target));\n  EXPECT_FALSE(g.isReachable(f2, target));\n  EXPECT_FALSE(g.isReachable(f3, target));\n\n  g.addEdge(f1, t0);\n  EXPECT_TRUE(g.isReachable(f0, target));\n  EXPECT_TRUE(g.isReachable(f1, target));\n  EXPECT_FALSE(g.isReachable(f2, target));\n  EXPECT_FALSE(g.isReachable(f3, target));\n  EXPECT_EQ(g.findPath(f0, target, path, ARRAY_SIZE(path)), 3U);\n  EXPECT_EQ(path[0], f0);\n  EXPECT_EQ(path[1], f1);\n  EXPECT_EQ(path[2], t0);\n  EXPECT_EQ(g.findPath(f1, target, path, ARRAY_SIZE(path)), 2U);\n  EXPECT_EQ(path[0], f1);\n  EXPECT_EQ(path[1], t0);\n\n  g.addEdge(f3, t1);\n  EXPECT_TRUE(g.isReachable(f0, target));\n  EXPECT_TRUE(g.isReachable(f1, target));\n  EXPECT_TRUE(g.isReachable(f2, target));\n  EXPECT_TRUE(g.isReachable(f3, target));\n}\n\nTEST(BVGraph, isReachable) {\n  Test_isReachable<BV1>();\n  Test_isReachable<BV2>();\n  Test_isReachable<BV3>();\n  Test_isReachable<BV4>();\n}\n\ntemplate <class BV>\nvoid LongCycle() {\n  BVGraph<BV> g;\n  g.clear();\n  vector<uptr> path_vec(g.size());\n  uptr *path = path_vec.data();\n  uptr start = 5;\n  for (uptr i = start; i < g.size() - 1; i++) {\n    g.addEdge(i, i + 1);\n    for (uptr j = 0; j < start; j++)\n      g.addEdge(i, j);\n  }\n  //  Bad graph that looks like this:\n  // 00000000000000\n  // 00000000000000\n  // 00000000000000\n  // 00000000000000\n  // 00000000000000\n  // 11111010000000\n  // 11111001000000\n  // 11111000100000\n  // 11111000010000\n  // 11111000001000\n  // 11111000000100\n  // 11111000000010\n  // 11111000000001\n  // if (g.size() <= 64) PrintGraph(g);\n  BV target;\n  for (uptr i = start + 1; i < g.size(); i += 11) {\n    // if ((i & (i - 1)) == 0) fprintf(stderr, \"Path: : %zd\\n\", i);\n    target.clear();\n    target.setBit(i);\n    EXPECT_TRUE(g.isReachable(start, target));\n    EXPECT_EQ(g.findPath(start, target, path, g.size()), i - start + 1);\n  }\n}\n\nTEST(BVGraph, LongCycle) {\n  LongCycle<BV1>();\n  LongCycle<BV2>();\n  LongCycle<BV3>();\n  LongCycle<BV4>();\n}\n\ntemplate <class BV>\nvoid ShortestPath() {\n  uptr path[8];\n  BVGraph<BV> g;\n  g.clear();\n  BV t7;\n  t7.clear();\n  t7.setBit(7);\n  // 1=>2=>3=>4=>5=>6=>7\n  // 1=>7\n  g.addEdge(1, 2);\n  g.addEdge(2, 3);\n  g.addEdge(3, 4);\n  g.addEdge(4, 5);\n  g.addEdge(5, 6);\n  g.addEdge(6, 7);\n  g.addEdge(1, 7);\n  EXPECT_TRUE(g.isReachable(1, t7));\n  // No path of length 1.\n  EXPECT_EQ(0U, g.findPath(1, t7, path, 1));\n  // Trying to find a path of len 2..6 gives path of len 2.\n  EXPECT_EQ(2U, g.findPath(1, t7, path, 2));\n  EXPECT_EQ(2U, g.findPath(1, t7, path, 3));\n  EXPECT_EQ(2U, g.findPath(1, t7, path, 4));\n  EXPECT_EQ(2U, g.findPath(1, t7, path, 5));\n  EXPECT_EQ(2U, g.findPath(1, t7, path, 6));\n  // Trying to find a path of len 7 gives path of len 7, because this is DFS.\n  EXPECT_EQ(7U, g.findPath(1, t7, path, 7));\n  // But findShortestPath will find the shortest path.\n  EXPECT_EQ(2U, g.findShortestPath(1, t7, path, 2));\n  EXPECT_EQ(2U, g.findShortestPath(1, t7, path, 7));\n}\n\nTEST(BVGraph, ShortestPath) {\n  ShortestPath<BV1>();\n  ShortestPath<BV2>();\n  ShortestPath<BV3>();\n  ShortestPath<BV4>();\n}\n\ntemplate <class BV>\nvoid RunAddEdgesTest() {\n  BVGraph<BV> g;\n  BV from;\n  const int kMaxEdges = 10;\n  uptr added_edges[kMaxEdges];\n  g.clear();\n  from.clear();\n  EXPECT_EQ(0U, g.addEdges(from, 0, added_edges, kMaxEdges));\n  EXPECT_EQ(0U, g.addEdges(from, 1, added_edges, kMaxEdges));\n  from.setBit(0);\n  EXPECT_EQ(1U, g.addEdges(from, 1, added_edges, kMaxEdges));\n  EXPECT_EQ(0U, added_edges[0]);\n  EXPECT_EQ(0U, g.addEdges(from, 1, added_edges, kMaxEdges));\n\n  from.clear();\n  from.setBit(1);\n  EXPECT_EQ(1U, g.addEdges(from, 4, added_edges, kMaxEdges));\n  EXPECT_TRUE(g.hasEdge(1, 4));\n  EXPECT_FALSE(g.hasEdge(1, 5));\n  EXPECT_EQ(1U, added_edges[0]);\n  from.setBit(2);\n  from.setBit(3);\n  EXPECT_EQ(2U, g.addEdges(from, 4, added_edges, kMaxEdges));\n  EXPECT_TRUE(g.hasEdge(2, 4));\n  EXPECT_FALSE(g.hasEdge(2, 5));\n  EXPECT_TRUE(g.hasEdge(3, 4));\n  EXPECT_FALSE(g.hasEdge(3, 5));\n  EXPECT_EQ(2U, added_edges[0]);\n  EXPECT_EQ(3U, added_edges[1]);\n}\n\nTEST(BVGraph, AddEdgesTest) {\n  RunAddEdgesTest<BV2>();\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_chained_origin_depot_test.cpp",
    "content": "//===-- sanitizer_chained_origin_depot_test.cpp ---------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// Tests for sanitizer_chained_origin_depot.h.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_chained_origin_depot.h\"\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n\nnamespace __sanitizer {\n\nstatic ChainedOriginDepot chainedOriginDepot;\n\nTEST(SanitizerCommon, ChainedOriginDepotBasic) {\n  u32 new_id;\n  EXPECT_TRUE(chainedOriginDepot.Put(1, 2, &new_id));\n  u32 prev_id;\n  EXPECT_EQ(chainedOriginDepot.Get(new_id, &prev_id), 1U);\n  EXPECT_EQ(prev_id, 2U);\n}\n\nTEST(SanitizerCommon, ChainedOriginDepotAbsent) {\n  u32 prev_id;\n  EXPECT_EQ(0U, chainedOriginDepot.Get(99, &prev_id));\n  EXPECT_EQ(0U, prev_id);\n}\n\nTEST(SanitizerCommon, ChainedOriginDepotZeroId) {\n  u32 prev_id;\n  EXPECT_EQ(0U, chainedOriginDepot.Get(0, &prev_id));\n  EXPECT_EQ(0U, prev_id);\n}\n\nTEST(SanitizerCommon, ChainedOriginDepotSame) {\n  u32 new_id1;\n  EXPECT_TRUE(chainedOriginDepot.Put(11, 12, &new_id1));\n  u32 new_id2;\n  EXPECT_FALSE(chainedOriginDepot.Put(11, 12, &new_id2));\n  EXPECT_EQ(new_id1, new_id2);\n\n  u32 prev_id;\n  EXPECT_EQ(chainedOriginDepot.Get(new_id1, &prev_id), 11U);\n  EXPECT_EQ(prev_id, 12U);\n}\n\nTEST(SanitizerCommon, ChainedOriginDepotDifferent) {\n  u32 new_id1;\n  EXPECT_TRUE(chainedOriginDepot.Put(21, 22, &new_id1));\n  u32 new_id2;\n  EXPECT_TRUE(chainedOriginDepot.Put(21, 23, &new_id2));\n  EXPECT_NE(new_id1, new_id2);\n\n  u32 prev_id;\n  EXPECT_EQ(chainedOriginDepot.Get(new_id1, &prev_id), 21U);\n  EXPECT_EQ(prev_id, 22U);\n  EXPECT_EQ(chainedOriginDepot.Get(new_id2, &prev_id), 21U);\n  EXPECT_EQ(prev_id, 23U);\n}\n\nTEST(SanitizerCommon, ChainedOriginDepotStats) {\n  StackDepotStats stats0 = chainedOriginDepot.GetStats();\n\n  u32 new_id;\n  EXPECT_TRUE(chainedOriginDepot.Put(33, 34, &new_id));\n  StackDepotStats stats1 = chainedOriginDepot.GetStats();\n  EXPECT_EQ(stats1.n_uniq_ids, stats0.n_uniq_ids + 1);\n  EXPECT_GT(stats1.allocated, stats0.allocated);\n\n  EXPECT_FALSE(chainedOriginDepot.Put(33, 34, &new_id));\n  StackDepotStats stats2 = chainedOriginDepot.GetStats();\n  EXPECT_EQ(stats2.n_uniq_ids, stats1.n_uniq_ids);\n  EXPECT_EQ(stats2.allocated, stats1.allocated);\n\n  for (int i = 0; i < 100000; ++i) {\n    ASSERT_TRUE(chainedOriginDepot.Put(35, i, &new_id));\n    StackDepotStats stats3 = chainedOriginDepot.GetStats();\n    ASSERT_EQ(stats3.n_uniq_ids, stats2.n_uniq_ids + 1 + i);\n  }\n  EXPECT_GT(chainedOriginDepot.GetStats().allocated, stats2.allocated);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_common_test.cpp",
    "content": "//===-- sanitizer_common_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include <algorithm>\n\n#include \"sanitizer_common/sanitizer_allocator_internal.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_file.h\"\n#include \"sanitizer_common/sanitizer_flags.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"sanitizer_common/sanitizer_platform.h\"\n\n#include \"sanitizer_pthread_wrappers.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nstatic bool IsSorted(const uptr *array, uptr n) {\n  for (uptr i = 1; i < n; i++) {\n    if (array[i] < array[i - 1]) return false;\n  }\n  return true;\n}\n\nTEST(SanitizerCommon, SortTest) {\n  uptr array[100];\n  uptr n = 100;\n  // Already sorted.\n  for (uptr i = 0; i < n; i++) {\n    array[i] = i;\n  }\n  Sort(array, n);\n  EXPECT_TRUE(IsSorted(array, n));\n  // Reverse order.\n  for (uptr i = 0; i < n; i++) {\n    array[i] = n - 1 - i;\n  }\n  Sort(array, n);\n  EXPECT_TRUE(IsSorted(array, n));\n  // Mixed order.\n  for (uptr i = 0; i < n; i++) {\n    array[i] = (i % 2 == 0) ? i : n - 1 - i;\n  }\n  Sort(array, n);\n  EXPECT_TRUE(IsSorted(array, n));\n  // All equal.\n  for (uptr i = 0; i < n; i++) {\n    array[i] = 42;\n  }\n  Sort(array, n);\n  EXPECT_TRUE(IsSorted(array, n));\n  // All but one sorted.\n  for (uptr i = 0; i < n - 1; i++) {\n    array[i] = i;\n  }\n  array[n - 1] = 42;\n  Sort(array, n);\n  EXPECT_TRUE(IsSorted(array, n));\n  // Minimal case - sort three elements.\n  array[0] = 1;\n  array[1] = 0;\n  Sort(array, 2);\n  EXPECT_TRUE(IsSorted(array, 2));\n}\n\nTEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {\n  uptr PageSize = GetPageSizeCached();\n  for (uptr size = 1; size <= 32; size *= 2) {\n    for (uptr alignment = 1; alignment <= 32; alignment *= 2) {\n      for (int iter = 0; iter < 100; iter++) {\n        uptr res = (uptr)MmapAlignedOrDieOnFatalError(\n            size * PageSize, alignment * PageSize, \"MmapAlignedOrDieTest\");\n        EXPECT_EQ(0U, res % (alignment * PageSize));\n        internal_memset((void*)res, 1, size * PageSize);\n        UnmapOrDie((void*)res, size * PageSize);\n      }\n    }\n  }\n}\n\nTEST(SanitizerCommon, Mprotect) {\n  uptr PageSize = GetPageSizeCached();\n  u8 *mem = reinterpret_cast<u8 *>(MmapOrDie(PageSize, \"MprotectTest\"));\n  for (u8 *p = mem; p < mem + PageSize; ++p) ++(*p);\n\n  MprotectReadOnly(reinterpret_cast<uptr>(mem), PageSize);\n  for (u8 *p = mem; p < mem + PageSize; ++p) EXPECT_EQ(1u, *p);\n  EXPECT_DEATH(++mem[0], \"\");\n  EXPECT_DEATH(++mem[PageSize / 2], \"\");\n  EXPECT_DEATH(++mem[PageSize - 1], \"\");\n\n  MprotectNoAccess(reinterpret_cast<uptr>(mem), PageSize);\n  volatile u8 t;\n  (void)t;\n  EXPECT_DEATH(t = mem[0], \"\");\n  EXPECT_DEATH(t = mem[PageSize / 2], \"\");\n  EXPECT_DEATH(t = mem[PageSize - 1], \"\");\n}\n\nTEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) {\n  InternalMmapVector<uptr> v;\n  v.reserve(1);\n  CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr));\n}\n\nTEST(SanitizerCommon, InternalMmapVectorReize) {\n  InternalMmapVector<uptr> v;\n  CHECK_EQ(0U, v.size());\n  CHECK_GE(v.capacity(), v.size());\n\n  v.reserve(1000);\n  CHECK_EQ(0U, v.size());\n  CHECK_GE(v.capacity(), 1000U);\n\n  v.resize(10000);\n  CHECK_EQ(10000U, v.size());\n  CHECK_GE(v.capacity(), v.size());\n  uptr cap = v.capacity();\n\n  v.resize(100);\n  CHECK_EQ(100U, v.size());\n  CHECK_EQ(v.capacity(), cap);\n\n  v.reserve(10);\n  CHECK_EQ(100U, v.size());\n  CHECK_EQ(v.capacity(), cap);\n}\n\nTEST(SanitizerCommon, InternalMmapVector) {\n  InternalMmapVector<uptr> vector;\n  for (uptr i = 0; i < 100; i++) {\n    EXPECT_EQ(i, vector.size());\n    vector.push_back(i);\n  }\n  for (uptr i = 0; i < 100; i++) {\n    EXPECT_EQ(i, vector[i]);\n  }\n  for (int i = 99; i >= 0; i--) {\n    EXPECT_EQ((uptr)i, vector.back());\n    vector.pop_back();\n    EXPECT_EQ((uptr)i, vector.size());\n  }\n  InternalMmapVector<uptr> empty_vector;\n  CHECK_EQ(empty_vector.capacity(), 0U);\n  CHECK_EQ(0U, empty_vector.size());\n}\n\nTEST(SanitizerCommon, InternalMmapVectorEq) {\n  InternalMmapVector<uptr> vector1;\n  InternalMmapVector<uptr> vector2;\n  for (uptr i = 0; i < 100; i++) {\n    vector1.push_back(i);\n    vector2.push_back(i);\n  }\n  EXPECT_TRUE(vector1 == vector2);\n  EXPECT_FALSE(vector1 != vector2);\n\n  vector1.push_back(1);\n  EXPECT_FALSE(vector1 == vector2);\n  EXPECT_TRUE(vector1 != vector2);\n\n  vector2.push_back(1);\n  EXPECT_TRUE(vector1 == vector2);\n  EXPECT_FALSE(vector1 != vector2);\n\n  vector1[55] = 1;\n  EXPECT_FALSE(vector1 == vector2);\n  EXPECT_TRUE(vector1 != vector2);\n}\n\nTEST(SanitizerCommon, InternalMmapVectorSwap) {\n  InternalMmapVector<uptr> vector1;\n  InternalMmapVector<uptr> vector2;\n  InternalMmapVector<uptr> vector3;\n  InternalMmapVector<uptr> vector4;\n  for (uptr i = 0; i < 100; i++) {\n    vector1.push_back(i);\n    vector2.push_back(i);\n    vector3.push_back(-i);\n    vector4.push_back(-i);\n  }\n  EXPECT_NE(vector2, vector3);\n  EXPECT_NE(vector1, vector4);\n  vector1.swap(vector3);\n  EXPECT_EQ(vector2, vector3);\n  EXPECT_EQ(vector1, vector4);\n}\n\nvoid TestThreadInfo(bool main) {\n  uptr stk_addr = 0;\n  uptr stk_size = 0;\n  uptr tls_addr = 0;\n  uptr tls_size = 0;\n  GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);\n\n  int stack_var;\n  EXPECT_NE(stk_addr, (uptr)0);\n  EXPECT_NE(stk_size, (uptr)0);\n  EXPECT_GT((uptr)&stack_var, stk_addr);\n  EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);\n\n#if SANITIZER_LINUX && defined(__x86_64__)\n  static __thread int thread_var;\n  EXPECT_NE(tls_addr, (uptr)0);\n  EXPECT_NE(tls_size, (uptr)0);\n  EXPECT_GT((uptr)&thread_var, tls_addr);\n  EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);\n\n  // Ensure that tls and stack do not intersect.\n  uptr tls_end = tls_addr + tls_size;\n  EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);\n  EXPECT_TRUE(tls_end  < stk_addr || tls_end  >=  stk_addr + stk_size);\n  EXPECT_TRUE((tls_addr < stk_addr) == (tls_end  < stk_addr));\n#endif\n}\n\nstatic void *WorkerThread(void *arg) {\n  TestThreadInfo(false);\n  return 0;\n}\n\nTEST(SanitizerCommon, ThreadStackTlsMain) {\n  InitTlsSize();\n  TestThreadInfo(true);\n}\n\nTEST(SanitizerCommon, ThreadStackTlsWorker) {\n  InitTlsSize();\n  pthread_t t;\n  PTHREAD_CREATE(&t, 0, WorkerThread, 0);\n  PTHREAD_JOIN(t, 0);\n}\n\nbool UptrLess(uptr a, uptr b) {\n  return a < b;\n}\n\nTEST(SanitizerCommon, InternalLowerBound) {\n  std::vector<int> arr = {1, 3, 5, 7, 11};\n\n  EXPECT_EQ(0u, InternalLowerBound(arr, 0));\n  EXPECT_EQ(0u, InternalLowerBound(arr, 1));\n  EXPECT_EQ(1u, InternalLowerBound(arr, 2));\n  EXPECT_EQ(1u, InternalLowerBound(arr, 3));\n  EXPECT_EQ(2u, InternalLowerBound(arr, 4));\n  EXPECT_EQ(2u, InternalLowerBound(arr, 5));\n  EXPECT_EQ(3u, InternalLowerBound(arr, 6));\n  EXPECT_EQ(3u, InternalLowerBound(arr, 7));\n  EXPECT_EQ(4u, InternalLowerBound(arr, 8));\n  EXPECT_EQ(4u, InternalLowerBound(arr, 9));\n  EXPECT_EQ(4u, InternalLowerBound(arr, 10));\n  EXPECT_EQ(4u, InternalLowerBound(arr, 11));\n  EXPECT_EQ(5u, InternalLowerBound(arr, 12));\n}\n\nTEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) {\n  std::vector<int> data;\n  auto create_item = [] (size_t i, size_t j) {\n    auto v = i * 10000 + j;\n    return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100;\n  };\n  for (size_t i = 0; i < 1000; ++i) {\n    data.resize(i);\n    for (size_t j = 0; j < i; ++j) {\n      data[j] = create_item(i, j);\n    }\n\n    std::sort(data.begin(), data.end());\n\n    for (size_t j = 0; j < i; ++j) {\n      int val = create_item(i, j);\n      for (auto to_find : {val - 1, val, val + 1}) {\n        uptr expected =\n            std::lower_bound(data.begin(), data.end(), to_find) - data.begin();\n        EXPECT_EQ(expected,\n                  InternalLowerBound(data, to_find, std::less<int>()));\n      }\n    }\n  }\n}\n\nclass SortAndDedupTest : public ::testing::TestWithParam<std::vector<int>> {};\n\nTEST_P(SortAndDedupTest, SortAndDedup) {\n  std::vector<int> v_std = GetParam();\n  std::sort(v_std.begin(), v_std.end());\n  v_std.erase(std::unique(v_std.begin(), v_std.end()), v_std.end());\n\n  std::vector<int> v = GetParam();\n  SortAndDedup(v);\n\n  EXPECT_EQ(v_std, v);\n}\n\nconst std::vector<int> kSortAndDedupTests[] = {\n    {},\n    {1},\n    {1, 1},\n    {1, 1, 1},\n    {1, 2, 3},\n    {3, 2, 1},\n    {1, 2, 2, 3},\n    {3, 3, 2, 1, 2},\n    {3, 3, 2, 1, 2},\n    {1, 2, 1, 1, 2, 1, 1, 1, 2, 2},\n    {1, 3, 3, 2, 3, 1, 3, 1, 4, 4, 2, 1, 4, 1, 1, 2, 2},\n};\nINSTANTIATE_TEST_SUITE_P(SortAndDedupTest, SortAndDedupTest,\n                         ::testing::ValuesIn(kSortAndDedupTests));\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nTEST(SanitizerCommon, FindPathToBinary) {\n  char *true_path = FindPathToBinary(\"true\");\n  EXPECT_NE((char*)0, internal_strstr(true_path, \"/bin/true\"));\n  InternalFree(true_path);\n  EXPECT_EQ(0, FindPathToBinary(\"unexisting_binary.ergjeorj\"));\n}\n#elif SANITIZER_WINDOWS\nTEST(SanitizerCommon, FindPathToBinary) {\n  // ntdll.dll should be on PATH in all supported test environments on all\n  // supported Windows versions.\n  char *ntdll_path = FindPathToBinary(\"ntdll.dll\");\n  EXPECT_NE((char*)0, internal_strstr(ntdll_path, \"ntdll.dll\"));\n  InternalFree(ntdll_path);\n  EXPECT_EQ(0, FindPathToBinary(\"unexisting_binary.ergjeorj\"));\n}\n#endif\n\nTEST(SanitizerCommon, StripPathPrefix) {\n  EXPECT_EQ(0, StripPathPrefix(0, \"prefix\"));\n  EXPECT_STREQ(\"foo\", StripPathPrefix(\"foo\", 0));\n  EXPECT_STREQ(\"dir/file.cc\",\n               StripPathPrefix(\"/usr/lib/dir/file.cc\", \"/usr/lib/\"));\n  EXPECT_STREQ(\"/file.cc\", StripPathPrefix(\"/usr/myroot/file.cc\", \"/myroot\"));\n  EXPECT_STREQ(\"file.h\", StripPathPrefix(\"/usr/lib/./file.h\", \"/usr/lib/\"));\n}\n\nTEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) {\n  RemoveANSIEscapeSequencesFromString(nullptr);\n  const char *buffs[22] = {\n    \"Default\",                                \"Default\",\n    \"\\033[95mLight magenta\",                  \"Light magenta\",\n    \"\\033[30mBlack\\033[32mGreen\\033[90mGray\", \"BlackGreenGray\",\n    \"\\033[106mLight cyan \\033[107mWhite \",    \"Light cyan White \",\n    \"\\033[31mHello\\033[0m World\",             \"Hello World\",\n    \"\\033[38;5;82mHello \\033[38;5;198mWorld\", \"Hello World\",\n    \"123[653456789012\",                       \"123[653456789012\",\n    \"Normal \\033[5mBlink \\033[25mNormal\",     \"Normal Blink Normal\",\n    \"\\033[106m\\033[107m\",                     \"\",\n    \"\",                                       \"\",\n    \" \",                                      \" \",\n  };\n\n  for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) {\n    char *buffer_copy = internal_strdup(buffs[i]);\n    RemoveANSIEscapeSequencesFromString(buffer_copy);\n    EXPECT_STREQ(buffer_copy, buffs[i+1]);\n    InternalFree(buffer_copy);\n  }\n}\n\nTEST(SanitizerCommon, InternalScopedString) {\n  InternalScopedString str;\n  EXPECT_EQ(0U, str.length());\n  EXPECT_STREQ(\"\", str.data());\n\n  str.append(\"foo\");\n  EXPECT_EQ(3U, str.length());\n  EXPECT_STREQ(\"foo\", str.data());\n\n  int x = 1234;\n  str.append(\"%d\", x);\n  EXPECT_EQ(7U, str.length());\n  EXPECT_STREQ(\"foo1234\", str.data());\n\n  str.append(\"%d\", x);\n  EXPECT_EQ(11U, str.length());\n  EXPECT_STREQ(\"foo12341234\", str.data());\n\n  str.clear();\n  EXPECT_EQ(0U, str.length());\n  EXPECT_STREQ(\"\", str.data());\n}\n\nTEST(SanitizerCommon, InternalScopedStringLarge) {\n  InternalScopedString str;\n  std::string expected;\n  for (int i = 0; i < 1000; ++i) {\n    std::string append(i, 'a' + i % 26);\n    expected += append;\n    str.append(\"%s\", append.c_str());\n    EXPECT_EQ(expected, str.data());\n  }\n}\n\nTEST(SanitizerCommon, InternalScopedStringLargeFormat) {\n  InternalScopedString str;\n  std::string expected;\n  for (int i = 0; i < 1000; ++i) {\n    std::string append(i, 'a' + i % 26);\n    expected += append;\n    str.append(\"%s\", append.c_str());\n    EXPECT_EQ(expected, str.data());\n  }\n}\n\n#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_IOS\nTEST(SanitizerCommon, GetRandom) {\n  u8 buffer_1[32], buffer_2[32];\n  for (bool blocking : { false, true }) {\n    EXPECT_FALSE(GetRandom(nullptr, 32, blocking));\n    EXPECT_FALSE(GetRandom(buffer_1, 0, blocking));\n    EXPECT_FALSE(GetRandom(buffer_1, 512, blocking));\n    EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2));\n    for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) {\n      for (uptr i = 0; i < 100; i++) {\n        EXPECT_TRUE(GetRandom(buffer_1, size, blocking));\n        EXPECT_TRUE(GetRandom(buffer_2, size, blocking));\n        EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0);\n      }\n    }\n  }\n}\n#endif\n\nTEST(SanitizerCommon, ReservedAddressRangeInit) {\n  uptr init_size = 0xffff;\n  ReservedAddressRange address_range;\n  uptr res = address_range.Init(init_size);\n  CHECK_NE(res, (void*)-1);\n  UnmapOrDie((void*)res, init_size);\n  // Should be able to map into the same space now.\n  ReservedAddressRange address_range2;\n  uptr res2 = address_range2.Init(init_size, nullptr, res);\n  CHECK_EQ(res, res2);\n\n  // TODO(flowerhack): Once this is switched to the \"real\" implementation\n  // (rather than passing through to MmapNoAccess*), enforce and test \"no\n  // double initializations allowed\"\n}\n\nTEST(SanitizerCommon, ReservedAddressRangeMap) {\n  constexpr uptr init_size = 0xffff;\n  ReservedAddressRange address_range;\n  uptr res = address_range.Init(init_size);\n  CHECK_NE(res, (void*) -1);\n\n  // Valid mappings should succeed.\n  CHECK_EQ(res, address_range.Map(res, init_size));\n\n  // Valid mappings should be readable.\n  unsigned char buffer[init_size];\n  memcpy(buffer, reinterpret_cast<void *>(res), init_size);\n\n  // TODO(flowerhack): Once this is switched to the \"real\" implementation, make\n  // sure you can only mmap into offsets in the Init range.\n}\n\nTEST(SanitizerCommon, ReservedAddressRangeUnmap) {\n  uptr PageSize = GetPageSizeCached();\n  uptr init_size = PageSize * 8;\n  ReservedAddressRange address_range;\n  uptr base_addr = address_range.Init(init_size);\n  CHECK_NE(base_addr, (void*)-1);\n  CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));\n\n  // Unmapping the entire range should succeed.\n  address_range.Unmap(base_addr, init_size);\n\n  // Map a new range.\n  base_addr = address_range.Init(init_size);\n  CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));\n\n  // Windows doesn't allow partial unmappings.\n  #if !SANITIZER_WINDOWS\n\n  // Unmapping at the beginning should succeed.\n  address_range.Unmap(base_addr, PageSize);\n\n  // Unmapping at the end should succeed.\n  uptr new_start = reinterpret_cast<uptr>(address_range.base()) +\n                   address_range.size() - PageSize;\n  address_range.Unmap(new_start, PageSize);\n\n  #endif\n\n  // Unmapping in the middle of the ReservedAddressRange should fail.\n  EXPECT_DEATH(address_range.Unmap(base_addr + (PageSize * 2), PageSize), \".*\");\n}\n\nTEST(SanitizerCommon, ReadBinaryNameCached) {\n  char buf[256];\n  EXPECT_NE((uptr)0, ReadBinaryNameCached(buf, sizeof(buf)));\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_deadlock_detector_test.cpp",
    "content": "//===-- sanitizer_deadlock_detector_test.cpp ------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizer runtime.\n// Tests for sanitizer_deadlock_detector.h\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_deadlock_detector.h\"\n\n#include \"sanitizer_test_utils.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <algorithm>\n#include <vector>\n#include <set>\n\nusing namespace __sanitizer;\nusing namespace std;\n\ntypedef BasicBitVector<u8> BV1;\ntypedef BasicBitVector<> BV2;\ntypedef TwoLevelBitVector<> BV3;\ntypedef TwoLevelBitVector<3, BasicBitVector<u8> > BV4;\n\n// Poor man's unique_ptr.\ntemplate<class BV>\nstruct ScopedDD {\n  ScopedDD() {\n    dp = new DeadlockDetector<BV>;\n    dp->clear();\n    dtls.clear();\n  }\n  ~ScopedDD() { delete dp; }\n  DeadlockDetector<BV> *dp;\n  DeadlockDetectorTLS<BV> dtls;\n};\n\ntemplate <class BV>\nvoid RunBasicTest() {\n  uptr path[10];\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n  set<uptr> s;\n  for (size_t i = 0; i < d.size() * 3; i++) {\n    uptr node = d.newNode(0);\n    EXPECT_TRUE(s.insert(node).second);\n  }\n\n  d.clear();\n  s.clear();\n  // Add size() nodes.\n  for (size_t i = 0; i < d.size(); i++) {\n    uptr node = d.newNode(0);\n    EXPECT_TRUE(s.insert(node).second);\n  }\n  // Remove all nodes.\n  for (set<uptr>::iterator it = s.begin(); it != s.end(); ++it)\n    d.removeNode(*it);\n  // The nodes should be reused.\n  for (size_t i = 0; i < d.size(); i++) {\n    uptr node = d.newNode(0);\n    EXPECT_FALSE(s.insert(node).second);\n  }\n\n  // Cycle: n1->n2->n1\n  {\n    d.clear();\n    dtls.clear();\n    uptr n1 = d.newNode(1);\n    uptr n2 = d.newNode(2);\n    EXPECT_FALSE(d.onLock(&dtls, n1));\n    EXPECT_FALSE(d.onLock(&dtls, n2));\n    d.onUnlock(&dtls, n2);\n    d.onUnlock(&dtls, n1);\n\n    EXPECT_FALSE(d.onLock(&dtls, n2));\n    EXPECT_EQ(0U, d.findPathToLock(&dtls, n1, path, 1));\n    EXPECT_EQ(2U, d.findPathToLock(&dtls, n1, path, 10));\n    EXPECT_EQ(2U, d.findPathToLock(&dtls, n1, path, 2));\n    EXPECT_TRUE(d.onLock(&dtls, n1));\n    EXPECT_EQ(path[0], n1);\n    EXPECT_EQ(path[1], n2);\n    EXPECT_EQ(d.getData(n1), 1U);\n    EXPECT_EQ(d.getData(n2), 2U);\n    d.onUnlock(&dtls, n1);\n    d.onUnlock(&dtls, n2);\n  }\n\n  // Cycle: n1->n2->n3->n1\n  {\n    d.clear();\n    dtls.clear();\n    uptr n1 = d.newNode(1);\n    uptr n2 = d.newNode(2);\n    uptr n3 = d.newNode(3);\n\n    EXPECT_FALSE(d.onLock(&dtls, n1));\n    EXPECT_FALSE(d.onLock(&dtls, n2));\n    d.onUnlock(&dtls, n2);\n    d.onUnlock(&dtls, n1);\n\n    EXPECT_FALSE(d.onLock(&dtls, n2));\n    EXPECT_FALSE(d.onLock(&dtls, n3));\n    d.onUnlock(&dtls, n3);\n    d.onUnlock(&dtls, n2);\n\n    EXPECT_FALSE(d.onLock(&dtls, n3));\n    EXPECT_EQ(0U, d.findPathToLock(&dtls, n1, path, 2));\n    EXPECT_EQ(3U, d.findPathToLock(&dtls, n1, path, 10));\n    EXPECT_TRUE(d.onLock(&dtls, n1));\n    EXPECT_EQ(path[0], n1);\n    EXPECT_EQ(path[1], n2);\n    EXPECT_EQ(path[2], n3);\n    EXPECT_EQ(d.getData(n1), 1U);\n    EXPECT_EQ(d.getData(n2), 2U);\n    EXPECT_EQ(d.getData(n3), 3U);\n    d.onUnlock(&dtls, n1);\n    d.onUnlock(&dtls, n3);\n  }\n}\n\nTEST(DeadlockDetector, BasicTest) {\n  RunBasicTest<BV1>();\n  RunBasicTest<BV2>();\n  RunBasicTest<BV3>();\n  RunBasicTest<BV4>();\n}\n\ntemplate <class BV>\nvoid RunRemoveNodeTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n\n  uptr l0 = d.newNode(0);\n  uptr l1 = d.newNode(1);\n  uptr l2 = d.newNode(2);\n  uptr l3 = d.newNode(3);\n  uptr l4 = d.newNode(4);\n  uptr l5 = d.newNode(5);\n\n  // l0=>l1=>l2\n  d.onLock(&dtls, l0);\n  d.onLock(&dtls, l1);\n  d.onLock(&dtls, l2);\n  d.onUnlock(&dtls, l1);\n  d.onUnlock(&dtls, l0);\n  d.onUnlock(&dtls, l2);\n  // l3=>l4=>l5\n  d.onLock(&dtls, l3);\n  d.onLock(&dtls, l4);\n  d.onLock(&dtls, l5);\n  d.onUnlock(&dtls, l4);\n  d.onUnlock(&dtls, l3);\n  d.onUnlock(&dtls, l5);\n\n  set<uptr> locks;\n  locks.insert(l0);\n  locks.insert(l1);\n  locks.insert(l2);\n  locks.insert(l3);\n  locks.insert(l4);\n  locks.insert(l5);\n  for (uptr i = 6; i < d.size(); i++) {\n    uptr lt = d.newNode(i);\n    locks.insert(lt);\n    d.onLock(&dtls, lt);\n    d.onUnlock(&dtls, lt);\n    d.removeNode(lt);\n  }\n  EXPECT_EQ(locks.size(), d.size());\n  // l2=>l0\n  EXPECT_FALSE(d.onLock(&dtls, l2));\n  EXPECT_TRUE(d.onLock(&dtls, l0));\n  d.onUnlock(&dtls, l2);\n  d.onUnlock(&dtls, l0);\n  // l4=>l3\n  EXPECT_FALSE(d.onLock(&dtls, l4));\n  EXPECT_TRUE(d.onLock(&dtls, l3));\n  d.onUnlock(&dtls, l4);\n  d.onUnlock(&dtls, l3);\n\n  EXPECT_EQ(d.size(), d.testOnlyGetEpoch());\n\n  d.removeNode(l2);\n  d.removeNode(l3);\n  locks.clear();\n  // make sure no edges from or to l0,l1,l4,l5 left.\n  for (uptr i = 4; i < d.size(); i++) {\n    uptr lt = d.newNode(i);\n    locks.insert(lt);\n    uptr a, b;\n    // l0 => lt?\n    a = l0; b = lt;\n    EXPECT_FALSE(d.onLock(&dtls, a));\n    EXPECT_FALSE(d.onLock(&dtls, b));\n    d.onUnlock(&dtls, a);\n    d.onUnlock(&dtls, b);\n    // l1 => lt?\n    a = l1; b = lt;\n    EXPECT_FALSE(d.onLock(&dtls, a));\n    EXPECT_FALSE(d.onLock(&dtls, b));\n    d.onUnlock(&dtls, a);\n    d.onUnlock(&dtls, b);\n    // lt => l4?\n    a = lt; b = l4;\n    EXPECT_FALSE(d.onLock(&dtls, a));\n    EXPECT_FALSE(d.onLock(&dtls, b));\n    d.onUnlock(&dtls, a);\n    d.onUnlock(&dtls, b);\n    // lt => l5?\n    a = lt; b = l5;\n    EXPECT_FALSE(d.onLock(&dtls, a));\n    EXPECT_FALSE(d.onLock(&dtls, b));\n    d.onUnlock(&dtls, a);\n    d.onUnlock(&dtls, b);\n\n    d.removeNode(lt);\n  }\n  // Still the same epoch.\n  EXPECT_EQ(d.size(), d.testOnlyGetEpoch());\n  EXPECT_EQ(locks.size(), d.size() - 4);\n  // l2 and l3 should have ben reused.\n  EXPECT_EQ(locks.count(l2), 1U);\n  EXPECT_EQ(locks.count(l3), 1U);\n}\n\nTEST(DeadlockDetector, RemoveNodeTest) {\n  RunRemoveNodeTest<BV1>();\n  RunRemoveNodeTest<BV2>();\n  RunRemoveNodeTest<BV3>();\n  RunRemoveNodeTest<BV4>();\n}\n\ntemplate <class BV>\nvoid RunMultipleEpochsTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n\n  set<uptr> locks;\n  for (uptr i = 0; i < d.size(); i++) {\n    EXPECT_TRUE(locks.insert(d.newNode(i)).second);\n  }\n  EXPECT_EQ(d.testOnlyGetEpoch(), d.size());\n  for (uptr i = 0; i < d.size(); i++) {\n    EXPECT_TRUE(locks.insert(d.newNode(i)).second);\n    EXPECT_EQ(d.testOnlyGetEpoch(), d.size() * 2);\n  }\n  locks.clear();\n\n  uptr l0 = d.newNode(0);\n  uptr l1 = d.newNode(0);\n  d.onLock(&dtls, l0);\n  d.onLock(&dtls, l1);\n  d.onUnlock(&dtls, l0);\n  EXPECT_EQ(d.testOnlyGetEpoch(), 3 * d.size());\n  for (uptr i = 0; i < d.size(); i++) {\n    EXPECT_TRUE(locks.insert(d.newNode(i)).second);\n  }\n  EXPECT_EQ(d.testOnlyGetEpoch(), 4 * d.size());\n\n#if !SANITIZER_DEBUG\n  // EXPECT_DEATH clones a thread with 4K stack,\n  // which is overflown by tsan memory accesses functions in debug mode.\n\n  // Can not handle the locks from the previous epoch.\n  // The caller should update the lock id.\n  EXPECT_DEATH(d.onLock(&dtls, l0), \"CHECK failed.*current_epoch_\");\n#endif\n}\n\nTEST(DeadlockDetector, MultipleEpochsTest) {\n  RunMultipleEpochsTest<BV1>();\n  RunMultipleEpochsTest<BV2>();\n  RunMultipleEpochsTest<BV3>();\n  RunMultipleEpochsTest<BV4>();\n}\n\ntemplate <class BV>\nvoid RunCorrectEpochFlush() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n  vector<uptr> locks1;\n  for (uptr i = 0; i < d.size(); i++)\n    locks1.push_back(d.newNode(i));\n  EXPECT_EQ(d.testOnlyGetEpoch(), d.size());\n  d.onLock(&dtls, locks1[3]);\n  d.onLock(&dtls, locks1[4]);\n  d.onLock(&dtls, locks1[5]);\n\n  // We have a new epoch, old locks in dtls will have to be forgotten.\n  uptr l0 = d.newNode(0);\n  EXPECT_EQ(d.testOnlyGetEpoch(), d.size() * 2);\n  uptr l1 = d.newNode(0);\n  EXPECT_EQ(d.testOnlyGetEpoch(), d.size() * 2);\n  d.onLock(&dtls, l0);\n  d.onLock(&dtls, l1);\n  EXPECT_TRUE(d.testOnlyHasEdgeRaw(0, 1));\n  EXPECT_FALSE(d.testOnlyHasEdgeRaw(1, 0));\n  EXPECT_FALSE(d.testOnlyHasEdgeRaw(3, 0));\n  EXPECT_FALSE(d.testOnlyHasEdgeRaw(4, 0));\n  EXPECT_FALSE(d.testOnlyHasEdgeRaw(5, 0));\n}\n\nTEST(DeadlockDetector, CorrectEpochFlush) {\n  RunCorrectEpochFlush<BV1>();\n  RunCorrectEpochFlush<BV2>();\n}\n\ntemplate <class BV>\nvoid RunTryLockTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n\n  uptr l0 = d.newNode(0);\n  uptr l1 = d.newNode(0);\n  uptr l2 = d.newNode(0);\n  EXPECT_FALSE(d.onLock(&dtls, l0));\n  EXPECT_FALSE(d.onTryLock(&dtls, l1));\n  EXPECT_FALSE(d.onLock(&dtls, l2));\n  EXPECT_TRUE(d.isHeld(&dtls, l0));\n  EXPECT_TRUE(d.isHeld(&dtls, l1));\n  EXPECT_TRUE(d.isHeld(&dtls, l2));\n  EXPECT_FALSE(d.testOnlyHasEdge(l0, l1));\n  EXPECT_TRUE(d.testOnlyHasEdge(l1, l2));\n  d.onUnlock(&dtls, l0);\n  d.onUnlock(&dtls, l1);\n  d.onUnlock(&dtls, l2);\n}\n\nTEST(DeadlockDetector, TryLockTest) {\n  RunTryLockTest<BV1>();\n  RunTryLockTest<BV2>();\n}\n\ntemplate <class BV>\nvoid RunOnFirstLockTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n\n  uptr l0 = d.newNode(0);\n  uptr l1 = d.newNode(0);\n  EXPECT_FALSE(d.onFirstLock(&dtls, l0));  // dtls has old epoch.\n  d.onLock(&dtls, l0);\n  d.onUnlock(&dtls, l0);\n\n  EXPECT_TRUE(d.onFirstLock(&dtls, l0));  // Ok, same ecpoch, first lock.\n  EXPECT_FALSE(d.onFirstLock(&dtls, l1));  // Second lock.\n  d.onLock(&dtls, l1);\n  d.onUnlock(&dtls, l1);\n  d.onUnlock(&dtls, l0);\n\n  EXPECT_TRUE(d.onFirstLock(&dtls, l0));  // Ok\n  d.onUnlock(&dtls, l0);\n\n  vector<uptr> locks1;\n  for (uptr i = 0; i < d.size(); i++)\n    locks1.push_back(d.newNode(i));\n\n  EXPECT_TRUE(d.onFirstLock(&dtls, l0));  // Epoch has changed, but not in dtls.\n\n  uptr l3 = d.newNode(0);\n  d.onLock(&dtls, l3);\n  d.onUnlock(&dtls, l3);\n\n  EXPECT_FALSE(d.onFirstLock(&dtls, l0));  // Epoch has changed in dtls.\n}\n\nTEST(DeadlockDetector, onFirstLockTest) {\n  RunOnFirstLockTest<BV2>();\n}\n\ntemplate <class BV>\nvoid RunRecusriveLockTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n\n  uptr l0 = d.newNode(0);\n  uptr l1 = d.newNode(0);\n  uptr l2 = d.newNode(0);\n  uptr l3 = d.newNode(0);\n\n  EXPECT_FALSE(d.onLock(&dtls, l0));\n  EXPECT_FALSE(d.onLock(&dtls, l1));\n  EXPECT_FALSE(d.onLock(&dtls, l0));  // Recurisve.\n  EXPECT_FALSE(d.onLock(&dtls, l2));\n  d.onUnlock(&dtls, l0);\n  EXPECT_FALSE(d.onLock(&dtls, l3));\n  d.onUnlock(&dtls, l0);\n  d.onUnlock(&dtls, l1);\n  d.onUnlock(&dtls, l2);\n  d.onUnlock(&dtls, l3);\n  EXPECT_TRUE(d.testOnlyHasEdge(l0, l1));\n  EXPECT_TRUE(d.testOnlyHasEdge(l0, l2));\n  EXPECT_TRUE(d.testOnlyHasEdge(l0, l3));\n}\n\nTEST(DeadlockDetector, RecusriveLockTest) {\n  RunRecusriveLockTest<BV2>();\n}\n\ntemplate <class BV>\nvoid RunLockContextTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n\n  uptr l0 = d.newNode(0);\n  uptr l1 = d.newNode(0);\n  uptr l2 = d.newNode(0);\n  uptr l3 = d.newNode(0);\n  uptr l4 = d.newNode(0);\n  EXPECT_FALSE(d.onLock(&dtls, l0, 10));\n  EXPECT_FALSE(d.onLock(&dtls, l1, 11));\n  EXPECT_FALSE(d.onLock(&dtls, l2, 12));\n  EXPECT_FALSE(d.onLock(&dtls, l3, 13));\n  EXPECT_EQ(10U, d.findLockContext(&dtls, l0));\n  EXPECT_EQ(11U, d.findLockContext(&dtls, l1));\n  EXPECT_EQ(12U, d.findLockContext(&dtls, l2));\n  EXPECT_EQ(13U, d.findLockContext(&dtls, l3));\n  d.onUnlock(&dtls, l0);\n  EXPECT_EQ(0U, d.findLockContext(&dtls, l0));\n  EXPECT_EQ(11U, d.findLockContext(&dtls, l1));\n  EXPECT_EQ(12U, d.findLockContext(&dtls, l2));\n  EXPECT_EQ(13U, d.findLockContext(&dtls, l3));\n  d.onUnlock(&dtls, l2);\n  EXPECT_EQ(0U, d.findLockContext(&dtls, l0));\n  EXPECT_EQ(11U, d.findLockContext(&dtls, l1));\n  EXPECT_EQ(0U, d.findLockContext(&dtls, l2));\n  EXPECT_EQ(13U, d.findLockContext(&dtls, l3));\n\n  EXPECT_FALSE(d.onLock(&dtls, l4, 14));\n  EXPECT_EQ(14U, d.findLockContext(&dtls, l4));\n}\n\nTEST(DeadlockDetector, LockContextTest) {\n  RunLockContextTest<BV2>();\n}\n\ntemplate <class BV>\nvoid RunRemoveEdgesTest() {\n  ScopedDD<BV> sdd;\n  DeadlockDetector<BV> &d = *sdd.dp;\n  DeadlockDetectorTLS<BV> &dtls = sdd.dtls;\n  vector<uptr> node(BV::kSize);\n  u32 stk_from = 0, stk_to = 0;\n  int unique_tid = 0;\n  for (size_t i = 0; i < BV::kSize; i++)\n    node[i] = d.newNode(0);\n\n  for (size_t i = 0; i < BV::kSize; i++)\n    EXPECT_FALSE(d.onLock(&dtls, node[i], i + 1));\n  for (size_t i = 0; i < BV::kSize; i++) {\n    for (uptr j = i + 1; j < BV::kSize; j++) {\n      EXPECT_TRUE(\n          d.findEdge(node[i], node[j], &stk_from, &stk_to, &unique_tid));\n      EXPECT_EQ(stk_from, i + 1);\n      EXPECT_EQ(stk_to, j + 1);\n    }\n  }\n  EXPECT_EQ(d.testOnlyGetEpoch(), d.size());\n  // Remove and re-create half of the nodes.\n  for (uptr i = 1; i < BV::kSize; i += 2)\n    d.removeNode(node[i]);\n  for (uptr i = 1; i < BV::kSize; i += 2)\n    node[i] = d.newNode(0);\n  EXPECT_EQ(d.testOnlyGetEpoch(), d.size());\n  // The edges from or to the removed nodes should be gone.\n  for (size_t i = 0; i < BV::kSize; i++) {\n    for (uptr j = i + 1; j < BV::kSize; j++) {\n      if ((i % 2) || (j % 2))\n        EXPECT_FALSE(\n            d.findEdge(node[i], node[j], &stk_from, &stk_to, &unique_tid));\n      else\n        EXPECT_TRUE(\n            d.findEdge(node[i], node[j], &stk_from, &stk_to, &unique_tid));\n    }\n  }\n}\n\nTEST(DeadlockDetector, RemoveEdgesTest) {\n  RunRemoveEdgesTest<BV1>();\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_dense_map_test.cpp",
    "content": "//===- sanitizer_dense_map_test.cpp -----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_dense_map.h\"\n\n#include <initializer_list>\n#include <map>\n#include <set>\n\n#include \"gtest/gtest.h\"\n\nusing namespace __sanitizer;\n\nnamespace {\n\n// Helps to keep some tests.\ntemplate <typename KeyT, typename ValueT,\n          typename KeyInfoT = DenseMapInfo<KeyT>>\nclass TestDenseMap : public DenseMap<KeyT, ValueT, KeyInfoT> {\n  using BaseT = DenseMap<KeyT, ValueT, KeyInfoT>;\n\n public:\n  using BaseT::BaseT;\n\n  TestDenseMap(std::initializer_list<typename BaseT::value_type> Vals)\n      : BaseT(Vals.size()) {\n    for (const auto &V : Vals) this->BaseT::insert(V);\n  }\n\n  template <typename I>\n  TestDenseMap(I B, I E) : BaseT(std::distance(B, E)) {\n    for (; B != E; ++B) this->BaseT::insert(*B);\n  }\n};\n\ntemplate <typename... T>\nusing DenseMap = TestDenseMap<T...>;\n\nuint32_t getTestKey(int i, uint32_t *) { return i; }\nuint32_t getTestValue(int i, uint32_t *) { return 42 + i; }\n\nuint32_t *getTestKey(int i, uint32_t **) {\n  static uint32_t dummy_arr1[8192];\n  assert(i < 8192 && \"Only support 8192 dummy keys.\");\n  return &dummy_arr1[i];\n}\nuint32_t *getTestValue(int i, uint32_t **) {\n  static uint32_t dummy_arr1[8192];\n  assert(i < 8192 && \"Only support 8192 dummy keys.\");\n  return &dummy_arr1[i];\n}\n\n/// A test class that tries to check that construction and destruction\n/// occur correctly.\nclass CtorTester {\n  static std::set<CtorTester *> Constructed;\n  int Value;\n\n public:\n  explicit CtorTester(int Value = 0) : Value(Value) {\n    EXPECT_TRUE(Constructed.insert(this).second);\n  }\n  CtorTester(uint32_t Value) : Value(Value) {\n    EXPECT_TRUE(Constructed.insert(this).second);\n  }\n  CtorTester(const CtorTester &Arg) : Value(Arg.Value) {\n    EXPECT_TRUE(Constructed.insert(this).second);\n  }\n  CtorTester &operator=(const CtorTester &) = default;\n  ~CtorTester() { EXPECT_EQ(1u, Constructed.erase(this)); }\n  operator uint32_t() const { return Value; }\n\n  int getValue() const { return Value; }\n  bool operator==(const CtorTester &RHS) const { return Value == RHS.Value; }\n};\n\nstd::set<CtorTester *> CtorTester::Constructed;\n\nstruct CtorTesterMapInfo {\n  static inline CtorTester getEmptyKey() { return CtorTester(-1); }\n  static inline CtorTester getTombstoneKey() { return CtorTester(-2); }\n  static unsigned getHashValue(const CtorTester &Val) {\n    return Val.getValue() * 37u;\n  }\n  static bool isEqual(const CtorTester &LHS, const CtorTester &RHS) {\n    return LHS == RHS;\n  }\n};\n\nCtorTester getTestKey(int i, CtorTester *) { return CtorTester(i); }\nCtorTester getTestValue(int i, CtorTester *) { return CtorTester(42 + i); }\n\n// Test fixture, with helper functions implemented by forwarding to global\n// function overloads selected by component types of the type parameter. This\n// allows all of the map implementations to be tested with shared\n// implementations of helper routines.\ntemplate <typename T>\nclass DenseMapTest : public ::testing::Test {\n protected:\n  T Map;\n\n  static typename T::key_type *const dummy_key_ptr;\n  static typename T::mapped_type *const dummy_value_ptr;\n\n  typename T::key_type getKey(int i = 0) {\n    return getTestKey(i, dummy_key_ptr);\n  }\n  typename T::mapped_type getValue(int i = 0) {\n    return getTestValue(i, dummy_value_ptr);\n  }\n};\n\ntemplate <typename T>\ntypename T::key_type *const DenseMapTest<T>::dummy_key_ptr = nullptr;\ntemplate <typename T>\ntypename T::mapped_type *const DenseMapTest<T>::dummy_value_ptr = nullptr;\n\n// Register these types for testing.\ntypedef ::testing::Types<DenseMap<uint32_t, uint32_t>,\n                         DenseMap<uint32_t *, uint32_t *>,\n                         DenseMap<CtorTester, CtorTester, CtorTesterMapInfo>>\n    DenseMapTestTypes;\nTYPED_TEST_SUITE(DenseMapTest, DenseMapTestTypes, );\n\n// Empty map tests\nTYPED_TEST(DenseMapTest, EmptyIntMapTest) {\n  // Size tests\n  EXPECT_EQ(0u, this->Map.size());\n  EXPECT_TRUE(this->Map.empty());\n\n  // Lookup tests\n  EXPECT_FALSE(this->Map.count(this->getKey()));\n  EXPECT_EQ(nullptr, this->Map.find(this->getKey()));\n  EXPECT_EQ(typename TypeParam::mapped_type(),\n            this->Map.lookup(this->getKey()));\n}\n\n// Constant map tests\nTYPED_TEST(DenseMapTest, ConstEmptyMapTest) {\n  const TypeParam &ConstMap = this->Map;\n  EXPECT_EQ(0u, ConstMap.size());\n  EXPECT_TRUE(ConstMap.empty());\n}\n\n// A map with a single entry\nTYPED_TEST(DenseMapTest, SingleEntryMapTest) {\n  this->Map[this->getKey()] = this->getValue();\n\n  // Size tests\n  EXPECT_EQ(1u, this->Map.size());\n  EXPECT_FALSE(this->Map.empty());\n\n  // Lookup tests\n  EXPECT_TRUE(this->Map.count(this->getKey()));\n  EXPECT_NE(nullptr, this->Map.find(this->getKey()));\n  EXPECT_EQ(this->getValue(), this->Map.lookup(this->getKey()));\n  EXPECT_EQ(this->getValue(), this->Map[this->getKey()]);\n}\n\n// Test clear() method\nTYPED_TEST(DenseMapTest, ClearTest) {\n  this->Map[this->getKey()] = this->getValue();\n  this->Map.clear();\n\n  EXPECT_EQ(0u, this->Map.size());\n  EXPECT_TRUE(this->Map.empty());\n}\n\n// Test erase(iterator) method\nTYPED_TEST(DenseMapTest, EraseTest) {\n  this->Map[this->getKey()] = this->getValue();\n  this->Map.erase(this->Map.find(this->getKey()));\n\n  EXPECT_EQ(0u, this->Map.size());\n  EXPECT_TRUE(this->Map.empty());\n}\n\n// Test erase(value) method\nTYPED_TEST(DenseMapTest, EraseTest2) {\n  this->Map[this->getKey()] = this->getValue();\n  this->Map.erase(this->getKey());\n\n  EXPECT_EQ(0u, this->Map.size());\n  EXPECT_TRUE(this->Map.empty());\n}\n\n// Test insert() method\nTYPED_TEST(DenseMapTest, InsertTest) {\n  this->Map.insert(\n      typename TypeParam::value_type(this->getKey(), this->getValue()));\n  EXPECT_EQ(1u, this->Map.size());\n  EXPECT_EQ(this->getValue(), this->Map[this->getKey()]);\n}\n\n// Test copy constructor method\nTYPED_TEST(DenseMapTest, CopyConstructorTest) {\n  this->Map[this->getKey()] = this->getValue();\n  TypeParam copyMap(this->Map);\n\n  EXPECT_EQ(1u, copyMap.size());\n  EXPECT_EQ(this->getValue(), copyMap[this->getKey()]);\n}\n\n// Test copy constructor method where SmallDenseMap isn't small.\nTYPED_TEST(DenseMapTest, CopyConstructorNotSmallTest) {\n  for (int Key = 0; Key < 5; ++Key)\n    this->Map[this->getKey(Key)] = this->getValue(Key);\n  TypeParam copyMap(this->Map);\n\n  EXPECT_EQ(5u, copyMap.size());\n  for (int Key = 0; Key < 5; ++Key)\n    EXPECT_EQ(this->getValue(Key), copyMap[this->getKey(Key)]);\n}\n\n// Test copying from a default-constructed map.\nTYPED_TEST(DenseMapTest, CopyConstructorFromDefaultTest) {\n  TypeParam copyMap(this->Map);\n\n  EXPECT_TRUE(copyMap.empty());\n}\n\n// Test copying from an empty map where SmallDenseMap isn't small.\nTYPED_TEST(DenseMapTest, CopyConstructorFromEmptyTest) {\n  for (int Key = 0; Key < 5; ++Key)\n    this->Map[this->getKey(Key)] = this->getValue(Key);\n  this->Map.clear();\n  TypeParam copyMap(this->Map);\n\n  EXPECT_TRUE(copyMap.empty());\n}\n\n// Test assignment operator method\nTYPED_TEST(DenseMapTest, AssignmentTest) {\n  this->Map[this->getKey()] = this->getValue();\n  TypeParam copyMap = this->Map;\n\n  EXPECT_EQ(1u, copyMap.size());\n  EXPECT_EQ(this->getValue(), copyMap[this->getKey()]);\n\n  // test self-assignment.\n  copyMap = static_cast<TypeParam &>(copyMap);\n  EXPECT_EQ(1u, copyMap.size());\n  EXPECT_EQ(this->getValue(), copyMap[this->getKey()]);\n}\n\nTYPED_TEST(DenseMapTest, AssignmentTestNotSmall) {\n  for (int Key = 0; Key < 5; ++Key)\n    this->Map[this->getKey(Key)] = this->getValue(Key);\n  TypeParam copyMap = this->Map;\n\n  EXPECT_EQ(5u, copyMap.size());\n  for (int Key = 0; Key < 5; ++Key)\n    EXPECT_EQ(this->getValue(Key), copyMap[this->getKey(Key)]);\n\n  // test self-assignment.\n  copyMap = static_cast<TypeParam &>(copyMap);\n  EXPECT_EQ(5u, copyMap.size());\n  for (int Key = 0; Key < 5; ++Key)\n    EXPECT_EQ(this->getValue(Key), copyMap[this->getKey(Key)]);\n}\n\n// Test swap method\nTYPED_TEST(DenseMapTest, SwapTest) {\n  this->Map[this->getKey()] = this->getValue();\n  TypeParam otherMap;\n\n  this->Map.swap(otherMap);\n  EXPECT_EQ(0u, this->Map.size());\n  EXPECT_TRUE(this->Map.empty());\n  EXPECT_EQ(1u, otherMap.size());\n  EXPECT_EQ(this->getValue(), otherMap[this->getKey()]);\n\n  this->Map.swap(otherMap);\n  EXPECT_EQ(0u, otherMap.size());\n  EXPECT_TRUE(otherMap.empty());\n  EXPECT_EQ(1u, this->Map.size());\n  EXPECT_EQ(this->getValue(), this->Map[this->getKey()]);\n\n  // Make this more interesting by inserting 100 numbers into the map.\n  for (int i = 0; i < 100; ++i) this->Map[this->getKey(i)] = this->getValue(i);\n\n  this->Map.swap(otherMap);\n  EXPECT_EQ(0u, this->Map.size());\n  EXPECT_TRUE(this->Map.empty());\n  EXPECT_EQ(100u, otherMap.size());\n  for (int i = 0; i < 100; ++i)\n    EXPECT_EQ(this->getValue(i), otherMap[this->getKey(i)]);\n\n  this->Map.swap(otherMap);\n  EXPECT_EQ(0u, otherMap.size());\n  EXPECT_TRUE(otherMap.empty());\n  EXPECT_EQ(100u, this->Map.size());\n  for (int i = 0; i < 100; ++i)\n    EXPECT_EQ(this->getValue(i), this->Map[this->getKey(i)]);\n}\n\n// A more complex iteration test\nTYPED_TEST(DenseMapTest, IterationTest) {\n  int visited[100];\n  std::map<typename TypeParam::key_type, unsigned> visitedIndex;\n\n  // Insert 100 numbers into the map\n  for (int i = 0; i < 100; ++i) {\n    visited[i] = 0;\n    visitedIndex[this->getKey(i)] = i;\n\n    this->Map[this->getKey(i)] = this->getValue(i);\n  }\n\n  // Iterate over all numbers and mark each one found.\n  this->Map.forEach([&](const typename TypeParam::value_type &kv) {\n    ++visited[visitedIndex[kv.first]];\n    return true;\n  });\n\n  // Ensure every number was visited.\n  for (int i = 0; i < 100; ++i) ASSERT_EQ(1, visited[i]);\n}\n\nnamespace {\n// Simple class that counts how many moves and copy happens when growing a map\nstruct CountCopyAndMove {\n  static int Move;\n  static int Copy;\n  CountCopyAndMove() {}\n\n  CountCopyAndMove(const CountCopyAndMove &) { Copy++; }\n  CountCopyAndMove &operator=(const CountCopyAndMove &) {\n    Copy++;\n    return *this;\n  }\n  CountCopyAndMove(CountCopyAndMove &&) { Move++; }\n  CountCopyAndMove &operator=(const CountCopyAndMove &&) {\n    Move++;\n    return *this;\n  }\n};\nint CountCopyAndMove::Copy = 0;\nint CountCopyAndMove::Move = 0;\n\n}  // anonymous namespace\n\n// Test initializer list construction.\nTEST(DenseMapCustomTest, InitializerList) {\n  DenseMap<int, int> M({{0, 0}, {0, 1}, {1, 2}});\n  EXPECT_EQ(2u, M.size());\n  EXPECT_EQ(1u, M.count(0));\n  EXPECT_EQ(0, M[0]);\n  EXPECT_EQ(1u, M.count(1));\n  EXPECT_EQ(2, M[1]);\n}\n\n// Test initializer list construction.\nTEST(DenseMapCustomTest, EqualityComparison) {\n  DenseMap<int, int> M1({{0, 0}, {1, 2}});\n  DenseMap<int, int> M2({{0, 0}, {1, 2}});\n  DenseMap<int, int> M3({{0, 0}, {1, 3}});\n\n  EXPECT_EQ(M1, M2);\n  EXPECT_NE(M1, M3);\n}\n\n// Test for the default minimum size of a DenseMap\nTEST(DenseMapCustomTest, DefaultMinReservedSizeTest) {\n  // IF THIS VALUE CHANGE, please update InitialSizeTest, InitFromIterator, and\n  // ReserveTest as well!\n  const int ExpectedInitialBucketCount = 512;\n  // Formula from DenseMap::getMinBucketToReserveForEntries()\n  const int ExpectedMaxInitialEntries = ExpectedInitialBucketCount * 3 / 4 - 1;\n\n  DenseMap<int, CountCopyAndMove> Map;\n  // Will allocate 64 buckets\n  Map.reserve(1);\n  unsigned MemorySize = Map.getMemorySize();\n  CountCopyAndMove::Copy = 0;\n  CountCopyAndMove::Move = 0;\n  for (int i = 0; i < ExpectedMaxInitialEntries; ++i) {\n    detail::DenseMapPair<int, CountCopyAndMove> KV;\n    KV.first = i;\n    Map.insert(move(KV));\n  }\n  // Check that we didn't grow\n  EXPECT_EQ(MemorySize, Map.getMemorySize());\n  // Check that move was called the expected number of times\n  EXPECT_EQ(ExpectedMaxInitialEntries, CountCopyAndMove::Move);\n  // Check that no copy occurred\n  EXPECT_EQ(0, CountCopyAndMove::Copy);\n\n  // Adding one extra element should grow the map\n  detail::DenseMapPair<int, CountCopyAndMove> KV;\n  KV.first = ExpectedMaxInitialEntries;\n  Map.insert(move(KV));\n  // Check that we grew\n  EXPECT_NE(MemorySize, Map.getMemorySize());\n  // Check that move was called the expected number of times\n  //  This relies on move-construction elision, and cannot be reliably tested.\n  //   EXPECT_EQ(ExpectedMaxInitialEntries + 2, CountCopyAndMove::Move);\n  // Check that no copy occurred\n  EXPECT_EQ(0, CountCopyAndMove::Copy);\n}\n\n// Make sure creating the map with an initial size of N actually gives us enough\n// buckets to insert N items without increasing allocation size.\nTEST(DenseMapCustomTest, InitialSizeTest) {\n  // Test a few different size, 341 is *not* a random choice: we need a value\n  // that is 2/3 of a power of two to stress the grow() condition, and the power\n  // of two has to be at least 512 because of minimum size allocation in the\n  // DenseMap (see DefaultMinReservedSizeTest). 513 is a value just above the\n  // 512 default init.\n  for (auto Size : {1, 2, 48, 66, 341, 513}) {\n    DenseMap<int, CountCopyAndMove> Map(Size);\n    unsigned MemorySize = Map.getMemorySize();\n    CountCopyAndMove::Copy = 0;\n    CountCopyAndMove::Move = 0;\n    for (int i = 0; i < Size; ++i) {\n      detail::DenseMapPair<int, CountCopyAndMove> KV;\n      KV.first = i;\n      Map.insert(move(KV));\n    }\n    // Check that we didn't grow\n    EXPECT_EQ(MemorySize, Map.getMemorySize());\n    // Check that move was called the expected number of times\n    EXPECT_EQ(Size, CountCopyAndMove::Move);\n    // Check that no copy occurred\n    EXPECT_EQ(0, CountCopyAndMove::Copy);\n  }\n}\n\n// Make sure creating the map with a iterator range does not trigger grow()\nTEST(DenseMapCustomTest, InitFromIterator) {\n  std::vector<detail::DenseMapPair<int, CountCopyAndMove>> Values;\n  // The size is a random value greater than 64 (hardcoded DenseMap min init)\n  const int Count = 65;\n  for (int i = 0; i < Count; i++) Values.emplace_back(i, CountCopyAndMove());\n\n  CountCopyAndMove::Move = 0;\n  CountCopyAndMove::Copy = 0;\n  DenseMap<int, CountCopyAndMove> Map(Values.begin(), Values.end());\n  // Check that no move occurred\n  EXPECT_EQ(0, CountCopyAndMove::Move);\n  // Check that copy was called the expected number of times\n  EXPECT_EQ(Count, CountCopyAndMove::Copy);\n}\n\n// Make sure reserve actually gives us enough buckets to insert N items\n// without increasing allocation size.\nTEST(DenseMapCustomTest, ReserveTest) {\n  // Test a few different size, 341 is *not* a random choice: we need a value\n  // that is 2/3 of a power of two to stress the grow() condition, and the power\n  // of two has to be at least 512 because of minimum size allocation in the\n  // DenseMap (see DefaultMinReservedSizeTest). 513 is a value just above the\n  // 512 default init.\n  for (auto Size : {1, 2, 48, 66, 341, 513}) {\n    DenseMap<int, CountCopyAndMove> Map;\n    Map.reserve(Size);\n    unsigned MemorySize = Map.getMemorySize();\n    CountCopyAndMove::Copy = 0;\n    CountCopyAndMove::Move = 0;\n    for (int i = 0; i < Size; ++i) {\n      detail::DenseMapPair<int, CountCopyAndMove> KV;\n      KV.first = i;\n      Map.insert(move(KV));\n    }\n    // Check that we didn't grow\n    EXPECT_EQ(MemorySize, Map.getMemorySize());\n    // Check that move was called the expected number of times\n    EXPECT_EQ(Size, CountCopyAndMove::Move);\n    // Check that no copy occurred\n    EXPECT_EQ(0, CountCopyAndMove::Copy);\n  }\n}\n\n// Key traits that allows lookup with either an unsigned or char* key;\n// In the latter case, \"a\" == 0, \"b\" == 1 and so on.\nstruct TestDenseMapInfo {\n  static inline unsigned getEmptyKey() { return ~0; }\n  static inline unsigned getTombstoneKey() { return ~0U - 1; }\n  static unsigned getHashValue(const unsigned &Val) { return Val * 37U; }\n  static unsigned getHashValue(const char *Val) {\n    return (unsigned)(Val[0] - 'a') * 37U;\n  }\n  static bool isEqual(const unsigned &LHS, const unsigned &RHS) {\n    return LHS == RHS;\n  }\n  static bool isEqual(const char *LHS, const unsigned &RHS) {\n    return (unsigned)(LHS[0] - 'a') == RHS;\n  }\n};\n\n// find_as() tests\nTEST(DenseMapCustomTest, FindAsTest) {\n  DenseMap<unsigned, unsigned, TestDenseMapInfo> map;\n  map[0] = 1;\n  map[1] = 2;\n  map[2] = 3;\n\n  // Size tests\n  EXPECT_EQ(3u, map.size());\n\n  // Normal lookup tests\n  EXPECT_EQ(1u, map.count(1));\n  EXPECT_EQ(1u, map.find(0)->second);\n  EXPECT_EQ(2u, map.find(1)->second);\n  EXPECT_EQ(3u, map.find(2)->second);\n  EXPECT_EQ(nullptr, map.find(3));\n\n  // find_as() tests\n  EXPECT_EQ(1u, map.find_as(\"a\")->second);\n  EXPECT_EQ(2u, map.find_as(\"b\")->second);\n  EXPECT_EQ(3u, map.find_as(\"c\")->second);\n  EXPECT_EQ(nullptr, map.find_as(\"d\"));\n}\n\nTEST(DenseMapCustomTest, TryEmplaceTest) {\n  DenseMap<int, std::unique_ptr<int>> Map;\n  std::unique_ptr<int> P(new int(2));\n  auto Try1 = Map.try_emplace(0, new int(1));\n  EXPECT_TRUE(Try1.second);\n  auto Try2 = Map.try_emplace(0, std::move(P));\n  EXPECT_FALSE(Try2.second);\n  EXPECT_EQ(Try1.first, Try2.first);\n  EXPECT_NE(nullptr, P);\n}\n\nstruct IncompleteStruct;\n\nTEST(DenseMapCustomTest, OpaquePointerKey) {\n  // Test that we can use a pointer to an incomplete type as a DenseMap key.\n  // This is an important build time optimization, since many classes have\n  // DenseMap members.\n  DenseMap<IncompleteStruct *, int> Map;\n  int Keys[3] = {0, 0, 0};\n  IncompleteStruct *K1 = reinterpret_cast<IncompleteStruct *>(&Keys[0]);\n  IncompleteStruct *K2 = reinterpret_cast<IncompleteStruct *>(&Keys[1]);\n  IncompleteStruct *K3 = reinterpret_cast<IncompleteStruct *>(&Keys[2]);\n  Map.insert({K1, 1});\n  Map.insert({K2, 2});\n  Map.insert({K3, 3});\n  EXPECT_EQ(Map.count(K1), 1u);\n  EXPECT_EQ(Map[K1], 1);\n  EXPECT_EQ(Map[K2], 2);\n  EXPECT_EQ(Map[K3], 3);\n  Map.clear();\n  EXPECT_EQ(nullptr, Map.find(K1));\n  EXPECT_EQ(nullptr, Map.find(K2));\n  EXPECT_EQ(nullptr, Map.find(K3));\n}\n}  // namespace\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_flags_test.cpp",
    "content": "//===-- sanitizer_flags_test.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_flags.h\"\n#include \"sanitizer_common/sanitizer_flag_parser.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"sanitizer_common/sanitizer_allocator_internal.h\"\n#include \"gtest/gtest.h\"\n\n#include <stdint.h>\n#include <string.h>\n\nnamespace __sanitizer {\n\nstatic const char kFlagName[] = \"flag_name\";\nstatic const char kFlagDesc[] = \"flag description\";\n\ntemplate <typename T>\nstatic void TestFlag(T start_value, const char *env, T final_value) {\n  T flag = start_value;\n\n  FlagParser parser;\n  RegisterFlag(&parser, kFlagName, kFlagDesc, &flag);\n\n  parser.ParseString(env);\n\n  EXPECT_EQ(final_value, flag);\n\n  // Reporting unrecognized flags is needed to reset them.\n  ReportUnrecognizedFlags();\n}\n\ntemplate <>\nvoid TestFlag(const char *start_value, const char *env,\n                     const char *final_value) {\n  const char *flag = start_value;\n\n  FlagParser parser;\n  RegisterFlag(&parser, kFlagName, kFlagDesc, &flag);\n\n  parser.ParseString(env);\n\n  EXPECT_EQ(0, internal_strcmp(final_value, flag));\n\n  // Reporting unrecognized flags is needed to reset them.\n  ReportUnrecognizedFlags();\n}\n\nTEST(SanitizerCommon, BooleanFlags) {\n  TestFlag(false, \"flag_name=1\", true);\n  TestFlag(false, \"flag_name=yes\", true);\n  TestFlag(false, \"flag_name=true\", true);\n  TestFlag(true, \"flag_name=0\", false);\n  TestFlag(true, \"flag_name=no\", false);\n  TestFlag(true, \"flag_name=false\", false);\n\n  EXPECT_DEATH(TestFlag(false, \"flag_name\", true), \"expected '='\");\n  EXPECT_DEATH(TestFlag(false, \"flag_name=\", true),\n               \"Invalid value for bool option: ''\");\n  EXPECT_DEATH(TestFlag(false, \"flag_name=2\", true),\n               \"Invalid value for bool option: '2'\");\n  EXPECT_DEATH(TestFlag(false, \"flag_name=-1\", true),\n               \"Invalid value for bool option: '-1'\");\n  EXPECT_DEATH(TestFlag(false, \"flag_name=on\", true),\n               \"Invalid value for bool option: 'on'\");\n}\n\nTEST(SanitizerCommon, HandleSignalMode) {\n  TestFlag(kHandleSignalNo, \"flag_name=1\", kHandleSignalYes);\n  TestFlag(kHandleSignalNo, \"flag_name=yes\", kHandleSignalYes);\n  TestFlag(kHandleSignalNo, \"flag_name=true\", kHandleSignalYes);\n  TestFlag(kHandleSignalYes, \"flag_name=0\", kHandleSignalNo);\n  TestFlag(kHandleSignalYes, \"flag_name=no\", kHandleSignalNo);\n  TestFlag(kHandleSignalYes, \"flag_name=false\", kHandleSignalNo);\n  TestFlag(kHandleSignalNo, \"flag_name=2\", kHandleSignalExclusive);\n  TestFlag(kHandleSignalYes, \"flag_name=exclusive\", kHandleSignalExclusive);\n\n  EXPECT_DEATH(TestFlag(kHandleSignalNo, \"flag_name\", kHandleSignalNo),\n               \"expected '='\");\n  EXPECT_DEATH(TestFlag(kHandleSignalNo, \"flag_name=\", kHandleSignalNo),\n               \"Invalid value for signal handler option: ''\");\n  EXPECT_DEATH(TestFlag(kHandleSignalNo, \"flag_name=3\", kHandleSignalNo),\n               \"Invalid value for signal handler option: '3'\");\n  EXPECT_DEATH(TestFlag(kHandleSignalNo, \"flag_name=-1\", kHandleSignalNo),\n               \"Invalid value for signal handler option: '-1'\");\n  EXPECT_DEATH(TestFlag(kHandleSignalNo, \"flag_name=on\", kHandleSignalNo),\n               \"Invalid value for signal handler option: 'on'\");\n}\n\nTEST(SanitizerCommon, IntFlags) {\n  TestFlag(-11, 0, -11);\n  TestFlag(-11, \"flag_name=0\", 0);\n  TestFlag(-11, \"flag_name=42\", 42);\n  TestFlag(-11, \"flag_name=-42\", -42);\n\n  // Unrecognized flags are ignored.\n  TestFlag(-11, \"--flag_name=42\", -11);\n  TestFlag(-11, \"zzzzzzz=42\", -11);\n\n  EXPECT_DEATH(TestFlag(-11, \"flag_name\", 0), \"expected '='\");\n  EXPECT_DEATH(TestFlag(-11, \"flag_name=42U\", 0),\n               \"Invalid value for int option\");\n}\n\nTEST(SanitizerCommon, LongLongIntFlags) {\n  s64 InitValue = -5;\n  s64 IntMin = INT64_MIN;\n  s64 IntMax = INT64_MAX;\n  TestFlag(InitValue, \"flag_name=0\", 0ll);\n  TestFlag(InitValue, \"flag_name=42\", 42ll);\n  TestFlag(InitValue, \"flag_name=-42\", -42ll);\n\n  TestFlag(InitValue, \"flag_name=-9223372036854775808\", IntMin);\n  TestFlag(InitValue, \"flag_name=9223372036854775807\", IntMax);\n\n  TestFlag(InitValue, \"flag_name=-92233720368547758080000\", IntMin);\n  TestFlag(InitValue, \"flag_name=92233720368547758070000\", IntMax);\n}\n\nTEST(SanitizerCommon, StrFlags) {\n  TestFlag(\"zzz\", 0, \"zzz\");\n  TestFlag(\"zzz\", \"flag_name=\", \"\");\n  TestFlag(\"zzz\", \"flag_name=abc\", \"abc\");\n  TestFlag(\"\", \"flag_name=abc\", \"abc\");\n  TestFlag(\"\", \"flag_name='abc zxc'\", \"abc zxc\");\n  // TestStrFlag(\"\", \"flag_name=\\\"abc qwe\\\" asd\", \"abc qwe\");\n}\n\nstatic void TestTwoFlags(const char *env, bool expected_flag1,\n                         const char *expected_flag2,\n                         const char *name1 = \"flag1\",\n                         const char *name2 = \"flag2\") {\n  bool flag1 = !expected_flag1;\n  const char *flag2 = \"\";\n\n  FlagParser parser;\n  RegisterFlag(&parser, name1, kFlagDesc, &flag1);\n  RegisterFlag(&parser, name2, kFlagDesc, &flag2);\n\n  parser.ParseString(env);\n\n  EXPECT_EQ(expected_flag1, flag1);\n  EXPECT_EQ(0, internal_strcmp(flag2, expected_flag2));\n\n  // Reporting unrecognized flags is needed to reset them.\n  ReportUnrecognizedFlags();\n}\n\nTEST(SanitizerCommon, MultipleFlags) {\n  TestTwoFlags(\"flag1=1 flag2='zzz'\", true, \"zzz\");\n  TestTwoFlags(\"flag2='qxx' flag1=0\", false, \"qxx\");\n  TestTwoFlags(\"flag1=false:flag2='zzz'\", false, \"zzz\");\n  TestTwoFlags(\"flag2=qxx:flag1=yes\", true, \"qxx\");\n  TestTwoFlags(\"flag2=qxx\\nflag1=yes\", true, \"qxx\");\n  TestTwoFlags(\"flag2=qxx\\r\\nflag1=yes\", true, \"qxx\");\n  TestTwoFlags(\"flag2=qxx\\tflag1=yes\", true, \"qxx\");\n}\n\nTEST(SanitizerCommon, CommonSuffixFlags) {\n  TestTwoFlags(\"flag=1 other_flag='zzz'\", true, \"zzz\", \"flag\", \"other_flag\");\n  TestTwoFlags(\"other_flag='zzz' flag=1\", true, \"zzz\", \"flag\", \"other_flag\");\n  TestTwoFlags(\"other_flag=' flag=0 ' flag=1\", true, \" flag=0 \", \"flag\",\n               \"other_flag\");\n  TestTwoFlags(\"flag=1 other_flag=' flag=0 '\", true, \" flag=0 \", \"flag\",\n               \"other_flag\");\n}\n\nTEST(SanitizerCommon, CommonFlags) {\n  CommonFlags cf;\n  FlagParser parser;\n  RegisterCommonFlags(&parser, &cf);\n\n  cf.SetDefaults();\n  EXPECT_TRUE(cf.symbolize);\n  EXPECT_STREQ(\".\", cf.coverage_dir);\n\n  cf.symbolize = false;\n  cf.coverage = true;\n  cf.heap_profile = true;\n  cf.log_path = \"path/one\";\n\n  parser.ParseString(\"symbolize=1:heap_profile=false log_path='path/two'\");\n  EXPECT_TRUE(cf.symbolize);\n  EXPECT_TRUE(cf.coverage);\n  EXPECT_FALSE(cf.heap_profile);\n  EXPECT_STREQ(\"path/two\", cf.log_path);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_flat_map_test.cpp",
    "content": "//===-- sanitizer_flat_map_test.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_flat_map.h\"\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/tests/sanitizer_pthread_wrappers.h\"\n\nusing namespace __sanitizer;\n\nnamespace {\nstruct TestMapUnmapCallback1 {\n  static int map_count, unmap_count;\n  void OnMap(uptr p, uptr size) const { map_count++; }\n  void OnUnmap(uptr p, uptr size) const { unmap_count++; }\n};\nint TestMapUnmapCallback1::map_count;\nint TestMapUnmapCallback1::unmap_count;\n\nstruct TestStruct {\n  int data[125] = {};\n  TestStruct(uptr v = 0) { data[11] = v; }\n  bool operator==(const TestStruct &other) const {\n    return 0 == memcmp(data, other.data, sizeof(data));\n  }\n};\n\ntemplate <typename T>\nclass FlatMapTest : public ::testing::Test {};\n\nusing FlatMapTestTypes = ::testing::Types<u8, u64, TestStruct>;\nTYPED_TEST_SUITE(FlatMapTest, FlatMapTestTypes, );\n\nTYPED_TEST(FlatMapTest, TwoLevelByteMap) {\n  const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;\n  const u64 n = kSize1 * kSize2;\n  TwoLevelMap<TypeParam, kSize1, kSize2> m;\n  m.Init();\n\n  m[7] = {10};\n  for (u64 i = 0; i < kSize2; ++i) {\n    EXPECT_TRUE(m.contains(i));\n  }\n  EXPECT_FALSE(m.contains(kSize2));\n\n  for (u64 i = 0; i < n; i += 7) {\n    m[i] = TypeParam((i % 100) + 1);\n  }\n  for (u64 j = 0; j < n; j++) {\n    EXPECT_TRUE(m.contains(j));\n    if (j % 7)\n      EXPECT_EQ(m[j], TypeParam());\n    else\n      EXPECT_EQ(m[j], TypeParam((j % 100) + 1));\n  }\n\n  m.TestOnlyUnmap();\n}\n\ntemplate <typename TypeParam, typename AddressSpaceView>\nusing TestMapASVT = TwoLevelMap<TypeParam, 1 << 8, 1 << 7, AddressSpaceView,\n                                TestMapUnmapCallback1>;\ntemplate <typename TypeParam>\nusing TestMap = TestMapASVT<TypeParam, LocalAddressSpaceView>;\n\ntemplate <typename TypeParam>\nstruct TestMapParam {\n  TestMap<TypeParam> *m;\n  size_t shard;\n  size_t num_shards;\n};\n\ntemplate <typename TypeParam>\nstatic void *TwoLevelMapUserThread(void *param) {\n  TestMapParam<TypeParam> *p = (TestMapParam<TypeParam> *)param;\n  for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {\n    TypeParam val = (i % 100) + 1;\n    (*p->m)[i] = val;\n    EXPECT_EQ((*p->m)[i], val);\n  }\n  return 0;\n}\n\nTYPED_TEST(FlatMapTest, ThreadedTwoLevelByteMap) {\n  TestMap<TypeParam> m;\n  m.Init();\n  TestMapUnmapCallback1::map_count = 0;\n  TestMapUnmapCallback1::unmap_count = 0;\n  static const int kNumThreads = 4;\n  pthread_t t[kNumThreads];\n  TestMapParam<TypeParam> p[kNumThreads];\n  for (int i = 0; i < kNumThreads; i++) {\n    p[i].m = &m;\n    p[i].shard = i;\n    p[i].num_shards = kNumThreads;\n    PTHREAD_CREATE(&t[i], 0, TwoLevelMapUserThread<TypeParam>, &p[i]);\n  }\n  for (int i = 0; i < kNumThreads; i++) {\n    PTHREAD_JOIN(t[i], 0);\n  }\n  EXPECT_EQ((uptr)TestMapUnmapCallback1::map_count, m.size1());\n  EXPECT_EQ((uptr)TestMapUnmapCallback1::unmap_count, 0UL);\n  m.TestOnlyUnmap();\n  EXPECT_EQ((uptr)TestMapUnmapCallback1::map_count, m.size1());\n  EXPECT_EQ((uptr)TestMapUnmapCallback1::unmap_count, m.size1());\n}\n\n}  // namespace\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_format_interceptor_test.cpp",
    "content": "//===-- sanitizer_format_interceptor_test.cpp -----------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for *scanf interceptors implementation in sanitizer_common.\n//\n//===----------------------------------------------------------------------===//\n#include <algorithm>\n#include <vector>\n\n#include \"interception/interception.h\"\n#include \"sanitizer_test_utils.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"gtest/gtest.h\"\n\nusing namespace __sanitizer;\n\n#define COMMON_INTERCEPTOR_READ_WRITE_RANGE(ctx, ptr, size)                    \\\n  do {                                                                         \\\n    ((std::vector<unsigned> *)ctx)->push_back(size);                           \\\n    ptr = ptr;                                                                 \\\n  } while (0)\n\n#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size)                          \\\n  COMMON_INTERCEPTOR_READ_WRITE_RANGE(ctx, ptr, size)\n\n#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size)                         \\\n  COMMON_INTERCEPTOR_READ_WRITE_RANGE(ctx, ptr, size)\n\n#define SANITIZER_INTERCEPT_PRINTF 1\n#include \"sanitizer_common/sanitizer_common_interceptors_format.inc\"\n\nstatic const unsigned I = sizeof(int);\nstatic const unsigned L = sizeof(long);\nstatic const unsigned LL = sizeof(long long);\nstatic const unsigned S = sizeof(short);\nstatic const unsigned C = sizeof(char);\nstatic const unsigned LC = sizeof(wchar_t);\nstatic const unsigned D = sizeof(double);\nstatic const unsigned LD = sizeof(long double);\nstatic const unsigned F = sizeof(float);\nstatic const unsigned P = sizeof(char *);\n\nstatic void verifyFormatResults(const char *format, unsigned n,\n                                const std::vector<unsigned> &computed_sizes,\n                                const std::vector<unsigned> &expected_sizes) {\n  // \"+ 1\" because of the format string\n  ASSERT_EQ(n + 1,\n            computed_sizes.size()) << \"Unexpected number of format arguments: '\"\n                                   << format << \"'\";\n  for (unsigned i = 0; i < n; ++i)\n    EXPECT_EQ(expected_sizes[i], computed_sizes[i + 1])\n        << \"Unexpect write size for argument \" << i << \", format string '\"\n        << format << \"'\";\n}\n\nstatic const char test_buf[] = \"Test string.\";\nstatic const size_t test_buf_size = sizeof(test_buf);\n\nstatic const unsigned SCANF_ARGS_MAX = 16;\n\nstatic void testScanf3(void *ctx, int result, bool allowGnuMalloc,\n                       const char *format, ...) {\n  va_list ap;\n  va_start(ap, format);\n  scanf_common(ctx, result, allowGnuMalloc, format, ap);\n  va_end(ap);\n}\n\nstatic void testScanf2(const char *format, int scanf_result,\n                       bool allowGnuMalloc, unsigned n,\n                       va_list expected_sizes_va) {\n  std::vector<unsigned> scanf_sizes, expected_sizes;\n  for (unsigned i = 0; i < n; ++i)\n    expected_sizes.push_back(va_arg(expected_sizes_va, unsigned));\n\n  // 16 args should be enough.\n  testScanf3((void *)&scanf_sizes, scanf_result, allowGnuMalloc, format,\n             test_buf, test_buf, test_buf, test_buf, test_buf, test_buf,\n             test_buf, test_buf, test_buf, test_buf, test_buf, test_buf,\n             test_buf, test_buf, test_buf, test_buf);\n  verifyFormatResults(format, n, scanf_sizes, expected_sizes);\n}\n\nstatic void testScanf(const char *format, unsigned n, ...) {\n  va_list ap;\n  va_start(ap, n);\n  testScanf2(format, SCANF_ARGS_MAX, /* allowGnuMalloc */ true, n, ap);\n  va_end(ap);\n}\n\nstatic void testScanfPartial(const char *format, int scanf_result, unsigned n,\n                             ...) {\n  va_list ap;\n  va_start(ap, n);\n  testScanf2(format, scanf_result, /* allowGnuMalloc */ true,  n, ap);\n  va_end(ap);\n}\n\nstatic void testScanfNoGnuMalloc(const char *format, unsigned n, ...) {\n  va_list ap;\n  va_start(ap, n);\n  testScanf2(format, SCANF_ARGS_MAX, /* allowGnuMalloc */ false, n, ap);\n  va_end(ap);\n}\n\nTEST(SanitizerCommonInterceptors, Scanf) {\n  testScanf(\"%d\", 1, I);\n  testScanf(\"%d%d%d\", 3, I, I, I);\n  testScanf(\"ab%u%dc\", 2, I, I);\n  testScanf(\"%ld\", 1, L);\n  testScanf(\"%llu\", 1, LL);\n  testScanf(\"%qd\", 1, LL);\n  testScanf(\"a %hd%hhx\", 2, S, C);\n  testScanf(\"%c\", 1, C);\n  testScanf(\"%lc\", 1, LC);\n\n  testScanf(\"%%\", 0);\n  testScanf(\"a%%\", 0);\n  testScanf(\"a%%b\", 0);\n  testScanf(\"a%%%%b\", 0);\n  testScanf(\"a%%b%%\", 0);\n  testScanf(\"a%%%%%%b\", 0);\n  testScanf(\"a%%%%%b\", 0);\n  testScanf(\"a%%%%%f\", 1, F);\n  testScanf(\"a%%%lxb\", 1, L);\n  testScanf(\"a%lf%%%lxb\", 2, D, L);\n  testScanf(\"%nf\", 1, I);\n\n  testScanf(\"%10s\", 1, 11);\n  testScanf(\"%10c\", 1, 10);\n  testScanf(\"%10ls\", 1, 11 * LC);\n  testScanf(\"%10lc\", 1, 10 * LC);\n  testScanf(\"%%10s\", 0);\n  testScanf(\"%*10s\", 0);\n  testScanf(\"%*d\", 0);\n\n  testScanf(\"%4d%8f%c\", 3, I, F, C);\n  testScanf(\"%s%d\", 2, test_buf_size, I);\n  testScanf(\"%[abc]\", 1, test_buf_size);\n  testScanf(\"%4[bcdef]\", 1, 5);\n  testScanf(\"%[]]\", 1, test_buf_size);\n  testScanf(\"%8[^]%d0-9-]%c\", 2, 9, C);\n\n  testScanf(\"%*[^:]%n:%d:%1[ ]%n\", 4, I, I, 2, I);\n\n  testScanf(\"%*d%u\", 1, I);\n\n  testScanf(\"%c%d\", 2, C, I);\n  testScanf(\"%A%lf\", 2, F, D);\n\n  testScanf(\"s%Las\", 1, LD);\n  testScanf(\"%ar\", 1, F);\n\n  // In the cases with std::min below the format spec can be interpreted as\n  // either floating-something, or (GNU extension) callee-allocated string.\n  // Our conservative implementation reports one of the two possibilities with\n  // the least store range.\n  testScanf(\"%a[\", 0);\n  testScanf(\"%a[]\", 0);\n  testScanf(\"%a[]]\", 1, std::min(F, P));\n  testScanf(\"%a[abc]\", 1, std::min(F, P));\n  testScanf(\"%a[^abc]\", 1, std::min(F, P));\n  testScanf(\"%a[ab%c] %d\", 0);\n  testScanf(\"%a[^ab%c] %d\", 0);\n  testScanf(\"%as\", 1, std::min(F, P));\n  testScanf(\"%aS\", 1, std::min(F, P));\n  testScanf(\"%a13S\", 1, std::min(F, P));\n  testScanf(\"%alS\", 1, std::min(F, P));\n\n  testScanfNoGnuMalloc(\"s%Las\", 1, LD);\n  testScanfNoGnuMalloc(\"%ar\", 1, F);\n  testScanfNoGnuMalloc(\"%a[\", 1, F);\n  testScanfNoGnuMalloc(\"%a[]\", 1, F);\n  testScanfNoGnuMalloc(\"%a[]]\", 1, F);\n  testScanfNoGnuMalloc(\"%a[abc]\", 1, F);\n  testScanfNoGnuMalloc(\"%a[^abc]\", 1, F);\n  testScanfNoGnuMalloc(\"%a[ab%c] %d\", 3, F, C, I);\n  testScanfNoGnuMalloc(\"%a[^ab%c] %d\", 3, F, C, I);\n  testScanfNoGnuMalloc(\"%as\", 1, F);\n  testScanfNoGnuMalloc(\"%aS\", 1, F);\n  testScanfNoGnuMalloc(\"%a13S\", 1, F);\n  testScanfNoGnuMalloc(\"%alS\", 1, F);\n\n  testScanf(\"%5$d\", 0);\n  testScanf(\"%md\", 0);\n  testScanf(\"%m10s\", 0);\n\n  testScanfPartial(\"%d%d%d%d //1\\n\", 1, 1, I);\n  testScanfPartial(\"%d%d%d%d //2\\n\", 2, 2, I, I);\n  testScanfPartial(\"%d%d%d%d //3\\n\", 3, 3, I, I, I);\n  testScanfPartial(\"%d%d%d%d //4\\n\", 4, 4, I, I, I, I);\n\n  testScanfPartial(\"%d%n%n%d //1\\n\", 1, 3, I, I, I);\n  testScanfPartial(\"%d%n%n%d //2\\n\", 2, 4, I, I, I, I);\n\n  testScanfPartial(\"%d%n%n%d %s %s\", 3, 5, I, I, I, I, test_buf_size);\n  testScanfPartial(\"%d%n%n%d %s %s\", 4, 6, I, I, I, I, test_buf_size,\n                   test_buf_size);\n}\n\nTEST(SanitizerCommonInterceptors, ScanfAllocate) {\n  const char *buf = \"123456\";\n\n  // Can not use testScanf() because this case needs a valid pointer to a string\n  // in the scanf argument.\n  {\n    std::vector<unsigned> scanf_sizes;\n    testScanf3((void *)&scanf_sizes, 2, /*allowGnuMalloc=*/false, \"%ms\", &buf);\n    verifyFormatResults(\"%ms\", 2, scanf_sizes,\n                        {P, (unsigned)(strlen(buf) + 1)});\n  }\n\n  {\n    std::vector<unsigned> scanf_sizes;\n    testScanf3((void *)&scanf_sizes, 2, /*allowGnuMalloc=*/false, \"%mc\", &buf);\n    verifyFormatResults(\"%mc\", 2, scanf_sizes,\n                        {P, (unsigned)(strlen(buf) + 1)});\n  }\n}\n\nstatic void testPrintf3(void *ctx, const char *format, ...) {\n  va_list ap;\n  va_start(ap, format);\n  printf_common(ctx, format, ap);\n  va_end(ap);\n}\n\nstatic void testPrintf2(const char *format, unsigned n,\n                        va_list expected_sizes_va) {\n  std::vector<unsigned> printf_sizes, expected_sizes;\n  for (unsigned i = 0; i < n; ++i)\n    expected_sizes.push_back(va_arg(expected_sizes_va, unsigned));\n\n  // 16 args should be enough.\n  testPrintf3((void *)&printf_sizes, format,\n             test_buf, test_buf, test_buf, test_buf, test_buf, test_buf,\n             test_buf, test_buf, test_buf, test_buf, test_buf, test_buf,\n             test_buf, test_buf, test_buf, test_buf);\n  verifyFormatResults(format, n, printf_sizes, expected_sizes);\n}\n\nstatic void testPrintf(const char *format, unsigned n, ...) {\n  va_list ap;\n  va_start(ap, n);\n  testPrintf2(format, n, ap);\n  va_end(ap);\n}\n\nTEST(SanitizerCommonInterceptors, Printf) {\n  // Only test functionality which differs from scanf\n\n  // Indexed arguments\n  testPrintf(\"%5$d\", 0);\n  testPrintf(\"%.*5$d\", 0);\n\n  // errno\n  testPrintf(\"%0-m\", 0);\n\n  // Dynamic width\n  testPrintf(\"%*n\", 1, I);\n  testPrintf(\"%*.10n\", 1, I);\n\n  // Precision\n  testPrintf(\"%10.10n\", 1, I);\n  testPrintf(\"%.3s\", 1, 3);\n  testPrintf(\"%.20s\", 1, test_buf_size);\n\n  // Dynamic precision\n  testPrintf(\"%.*n\", 1, I);\n  testPrintf(\"%10.*n\", 1, I);\n\n  // Dynamic precision for strings is not implemented yet.\n  testPrintf(\"%.*s\", 1, 0);\n\n  // Checks for wide-character strings are not implemented yet.\n  testPrintf(\"%ls\", 1, 0);\n\n  testPrintf(\"%m\", 0);\n  testPrintf(\"%m%s\", 1, test_buf_size);\n  testPrintf(\"%s%m%s\", 2, test_buf_size, test_buf_size);\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_hash_test.cpp",
    "content": "//===-- sanitizer_hash_test.cpp -------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of Sanitizers.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_hash.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\n// Tests matche a few hashes generated by https://github.com/aappleby/smhasher.\n\nTEST(SanitizerCommon, Hash32Seed) {\n  EXPECT_EQ(MurMur2HashBuilder(0).get(), 275646681u);\n  EXPECT_EQ(MurMur2HashBuilder(1).get(), 3030210376u);\n  EXPECT_EQ(MurMur2HashBuilder(3).get(), 1816185114u);\n}\n\nTEST(SanitizerCommon, Hash32Add) {\n  MurMur2HashBuilder h(123 * sizeof(u32));\n  for (u32 i = 0; i < 123; ++i) h.add(i);\n  EXPECT_EQ(h.get(), 351963665u);\n  for (u32 i = 0; i < 123; ++i) h.add(-i);\n  EXPECT_EQ(h.get(), 2640061027u);\n}\n\nTEST(SanitizerCommon, Hash64Seed) {\n  EXPECT_EQ(MurMur2Hash64Builder(0).get(), 4469829599815726255ull);\n  EXPECT_EQ(MurMur2Hash64Builder(1).get(), 14121968454562043709ull);\n  EXPECT_EQ(MurMur2Hash64Builder(3).get(), 8040757559320203998ull);\n}\n\nTEST(SanitizerCommon, Hash64Add) {\n  MurMur2Hash64Builder h(123 * sizeof(u64));\n  for (u32 i = 0; i < 123; ++i) h.add(i);\n  EXPECT_EQ(h.get(), 11366430808886012537ull);\n  for (u32 i = 0; i < 123; ++i) h.add(-i);\n  EXPECT_EQ(h.get(), 10843188204560467446ull);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_ioctl_test.cpp",
    "content": "//===-- sanitizer_ioctl_test.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for ioctl interceptor implementation in sanitizer_common.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if SANITIZER_LINUX\n\n#include <linux/input.h>\n#include <vector>\n\n#include \"interception/interception.h\"\n#include \"sanitizer_test_utils.h\"\n#include \"sanitizer_common/sanitizer_platform_limits_posix.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"gtest/gtest.h\"\n\n\nusing namespace __sanitizer;\n\n#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, sz) \\\n  do {                                              \\\n    (void) ctx;                                     \\\n    (void) ptr;                                     \\\n    (void) sz;                                      \\\n  } while (0)\n#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sz) \\\n  do {                                               \\\n    (void) ctx;                                      \\\n    (void) ptr;                                      \\\n    (void) sz;                                       \\\n  } while (0)\n\n#include \"sanitizer_common/sanitizer_common_interceptors_ioctl.inc\"\n\nstatic struct IoctlInit {\n  IoctlInit() {\n    ioctl_init();\n    // Avoid unused function warnings.\n    (void)&ioctl_common_pre;\n    (void)&ioctl_common_post;\n    (void)&ioctl_decode;\n  }\n} ioctl_static_initializer;\n\nTEST(SanitizerIoctl, Fixup) {\n  EXPECT_EQ((unsigned)FIONBIO, ioctl_request_fixup(FIONBIO));\n\n  EXPECT_EQ(EVIOCGBIT(0, 0), ioctl_request_fixup(EVIOCGBIT(0, 16)));\n  EXPECT_EQ(EVIOCGBIT(0, 0), ioctl_request_fixup(EVIOCGBIT(1, 16)));\n  EXPECT_EQ(EVIOCGBIT(0, 0), ioctl_request_fixup(EVIOCGBIT(1, 17)));\n  EXPECT_EQ(EVIOCGBIT(0, 0), ioctl_request_fixup(EVIOCGBIT(31, 16)));\n  EXPECT_NE(EVIOCGBIT(0, 0), ioctl_request_fixup(EVIOCGBIT(32, 16)));\n\n  EXPECT_EQ(EVIOCGABS(0), ioctl_request_fixup(EVIOCGABS(0)));\n  EXPECT_EQ(EVIOCGABS(0), ioctl_request_fixup(EVIOCGABS(5)));\n  EXPECT_EQ(EVIOCGABS(0), ioctl_request_fixup(EVIOCGABS(63)));\n  EXPECT_NE(EVIOCGABS(0), ioctl_request_fixup(EVIOCGABS(64)));\n\n  EXPECT_EQ(EVIOCSABS(0), ioctl_request_fixup(EVIOCSABS(0)));\n  EXPECT_EQ(EVIOCSABS(0), ioctl_request_fixup(EVIOCSABS(5)));\n  EXPECT_EQ(EVIOCSABS(0), ioctl_request_fixup(EVIOCSABS(63)));\n  EXPECT_NE(EVIOCSABS(0), ioctl_request_fixup(EVIOCSABS(64)));\n\n  const ioctl_desc *desc = ioctl_lookup(EVIOCGKEY(16));\n  EXPECT_NE((void *)0, desc);\n  EXPECT_EQ(EVIOCGKEY(0), desc->req);\n}\n\n// Test decoding KVM ioctl numbers.\nTEST(SanitizerIoctl, KVM_GET_MP_STATE) {\n  ioctl_desc desc;\n  unsigned int desc_value = SANITIZER_MIPS ? 0x4004ae98U : 0x8004ae98U;\n  bool res = ioctl_decode(desc_value, &desc);\n  EXPECT_TRUE(res);\n  EXPECT_EQ(ioctl_desc::WRITE, desc.type);\n  EXPECT_EQ(4U, desc.size);\n}\n\nTEST(SanitizerIoctl, KVM_GET_LAPIC) {\n  ioctl_desc desc;\n  unsigned int desc_value = SANITIZER_MIPS ? 0x4400ae8eU : 0x8400ae8eU;\n  bool res = ioctl_decode(desc_value, &desc);\n  EXPECT_TRUE(res);\n  EXPECT_EQ(ioctl_desc::WRITE, desc.type);\n  EXPECT_EQ(1024U, desc.size);\n}\n\nTEST(SanitizerIoctl, KVM_GET_MSR_INDEX_LIST) {\n  ioctl_desc desc;\n  bool res = ioctl_decode(0xc004ae02U, &desc);\n  EXPECT_TRUE(res);\n  EXPECT_EQ(ioctl_desc::READWRITE, desc.type);\n  EXPECT_EQ(4U, desc.size);\n}\n\n#endif // SANITIZER_LINUX\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_leb128_test.cpp",
    "content": "//===-- sanitizer_leb128.cpp ------------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_leb128.h\"\n\n#include <type_traits>\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\ntemplate <typename T>\nclass Leb128Test : public ::testing::Test {};\n\nusing Leb128TestTypes = ::testing::Types<u8, u16, u32, u64>;\nTYPED_TEST_SUITE(Leb128Test, Leb128TestTypes, );\n\nstatic uptr BitsNeeded(u64 v) {\n  if (!v)\n    return 1;\n  uptr r = 0;\n  uptr uptr_bits = 8 * sizeof(uptr);\n  while (v >> uptr_bits) {\n    r += uptr_bits;\n    v >>= uptr_bits;\n  }\n  return r + MostSignificantSetBitIndex(v) + 1;\n}\n\nTYPED_TEST(Leb128Test, SignedOverflow) {\n  using T = typename std::make_signed<TypeParam>::type;\n  u8 buffer[16] = {255};\n  T v = -128;\n  EXPECT_EQ(buffer + 1, EncodeSLEB128(v, buffer, buffer + 1));\n  EXPECT_EQ(buffer + 1, DecodeSLEB128(buffer, buffer + 1, &v));\n}\n\nTYPED_TEST(Leb128Test, Signed) {\n  using T = typename std::make_signed<TypeParam>::type;\n  T v = 0;\n  for (int i = 0; i < 100; ++i) {\n    u8 buffer[16] = {};\n    u8* p = EncodeSLEB128(v, std::begin(buffer), std::end(buffer));\n    EXPECT_EQ(int(BitsNeeded(v < 0 ? (-v - 1) : v) + 6 + 1) / 7, p - buffer)\n        << (int)v;\n    T v2;\n    u8* p2 = DecodeSLEB128(std::begin(buffer), std::end(buffer), &v2);\n    EXPECT_EQ(v, v2);\n    EXPECT_EQ(p, p2);\n    v = -TypeParam(v) * 3u + 1u;\n  }\n}\n\nTYPED_TEST(Leb128Test, UnsignedOverflow) {\n  using T = TypeParam;\n  u8 buffer[16] = {255};\n  T v = 255;\n  EXPECT_EQ(buffer + 1, EncodeULEB128(v, buffer, buffer + 1));\n  EXPECT_EQ(buffer + 1, DecodeULEB128(buffer, buffer + 1, &v));\n}\n\nTYPED_TEST(Leb128Test, Unsigned) {\n  using T = TypeParam;\n  T v = 0;\n  for (int i = 0; i < 100; ++i) {\n    u8 buffer[16] = {};\n    u8* p = EncodeULEB128(v, std::begin(buffer), std::end(buffer));\n    EXPECT_EQ(int(BitsNeeded(v) + 6) / 7, p - buffer);\n    T v2;\n    u8* p2 = DecodeULEB128(std::begin(buffer), std::end(buffer), &v2);\n    EXPECT_EQ(v, v2);\n    EXPECT_EQ(p, p2);\n    v = v * 3 + 1;\n  }\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_libc_test.cpp",
    "content": "//===-- sanitizer_libc_test.cpp -------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Tests for sanitizer_libc.h.\n//===----------------------------------------------------------------------===//\n#include <algorithm>\n#include <vector>\n#include <stdio.h>\n\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_file.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"sanitizer_common/sanitizer_platform.h\"\n#include \"gtest/gtest.h\"\n\n#if SANITIZER_WINDOWS\n#define NOMINMAX\n#include <windows.h>\n#undef NOMINMAX\n#endif\n#if SANITIZER_POSIX\n# include <sys/stat.h>\n# include \"sanitizer_common/sanitizer_posix.h\"\n#endif\n\nusing namespace __sanitizer;\n\n// A regression test for internal_memmove() implementation.\nTEST(SanitizerCommon, InternalMemmoveRegression) {\n  char src[] = \"Hello World\";\n  char *dest = src + 6;\n  __sanitizer::internal_memmove(dest, src, 5);\n  EXPECT_EQ(dest[0], src[0]);\n  EXPECT_EQ(dest[4], src[4]);\n}\n\nTEST(SanitizerCommon, mem_is_zero) {\n  size_t size = 128;\n  char *x = new char[size];\n  memset(x, 0, size);\n  for (size_t pos = 0; pos < size; pos++) {\n    x[pos] = 1;\n    for (size_t beg = 0; beg < size; beg++) {\n      for (size_t end = beg; end < size; end++) {\n        // fprintf(stderr, \"pos %zd beg %zd end %zd \\n\", pos, beg, end);\n        if (beg <= pos && pos < end)\n          EXPECT_FALSE(__sanitizer::mem_is_zero(x + beg, end - beg));\n        else\n          EXPECT_TRUE(__sanitizer::mem_is_zero(x + beg, end - beg));\n      }\n    }\n    x[pos] = 0;\n  }\n  delete [] x;\n}\n\nstruct stat_and_more {\n  struct stat st;\n  unsigned char z;\n};\n\nstatic void temp_file_name(char *buf, size_t bufsize, const char *prefix) {\n#if SANITIZER_WINDOWS\n  buf[0] = '\\0';\n  char tmp_dir[MAX_PATH];\n  if (!::GetTempPathA(MAX_PATH, tmp_dir))\n    return;\n  // GetTempFileNameA needs a MAX_PATH buffer.\n  char tmp_path[MAX_PATH];\n  if (!::GetTempFileNameA(tmp_dir, prefix, 0, tmp_path))\n    return;\n  internal_strncpy(buf, tmp_path, bufsize);\n#else\n  const char *tmpdir = \"/tmp\";\n#if SANITIZER_ANDROID\n  tmpdir = GetEnv(\"TMPDIR\");\n#endif\n  internal_snprintf(buf, bufsize, \"%s/%sXXXXXX\", tmpdir, prefix);\n  ASSERT_TRUE(mkstemp(buf));\n#endif\n}\n\nstatic void Unlink(const char *path) {\n#if SANITIZER_WINDOWS\n  // No sanitizer needs to delete a file on Windows yet. If we ever do, we can\n  // add a portable wrapper and test it from here.\n  ::DeleteFileA(&path[0]);\n#else\n  internal_unlink(path);\n#endif\n}\n\nTEST(SanitizerCommon, FileOps) {\n  const char *str1 = \"qwerty\";\n  uptr len1 = internal_strlen(str1);\n  const char *str2 = \"zxcv\";\n  uptr len2 = internal_strlen(str2);\n\n  char tmpfile[128];\n  temp_file_name(tmpfile, sizeof(tmpfile), \"sanitizer_common.fileops.tmp.\");\n  fd_t fd = OpenFile(tmpfile, WrOnly);\n  ASSERT_NE(fd, kInvalidFd);\n  ASSERT_TRUE(WriteToFile(fd, \"A\", 1));\n  CloseFile(fd);\n\n  fd = OpenFile(tmpfile, WrOnly);\n  ASSERT_NE(fd, kInvalidFd);\n#if SANITIZER_POSIX && !SANITIZER_MAC\n  EXPECT_EQ(internal_lseek(fd, 0, SEEK_END), 0u);\n#endif\n  uptr bytes_written = 0;\n  EXPECT_TRUE(WriteToFile(fd, str1, len1, &bytes_written));\n  EXPECT_EQ(len1, bytes_written);\n  EXPECT_TRUE(WriteToFile(fd, str2, len2, &bytes_written));\n  EXPECT_EQ(len2, bytes_written);\n  CloseFile(fd);\n\n  EXPECT_TRUE(FileExists(tmpfile));\n\n  fd = OpenFile(tmpfile, RdOnly);\n  ASSERT_NE(fd, kInvalidFd);\n\n#if SANITIZER_POSIX\n  // The stat wrappers are posix-only.\n  uptr fsize = internal_filesize(fd);\n  EXPECT_EQ(len1 + len2, fsize);\n\n  struct stat st1, st2, st3;\n  EXPECT_EQ(0u, internal_stat(tmpfile, &st1));\n  EXPECT_EQ(0u, internal_lstat(tmpfile, &st2));\n  EXPECT_EQ(0u, internal_fstat(fd, &st3));\n  EXPECT_EQ(fsize, (uptr)st3.st_size);\n\n  // Verify that internal_fstat does not write beyond the end of the supplied\n  // buffer.\n  struct stat_and_more sam;\n  memset(&sam, 0xAB, sizeof(sam));\n  EXPECT_EQ(0u, internal_fstat(fd, &sam.st));\n  EXPECT_EQ(0xAB, sam.z);\n  EXPECT_NE(0xAB, sam.st.st_size);\n  EXPECT_NE(0, sam.st.st_size);\n#endif\n\n  char buf[64] = {};\n  uptr bytes_read = 0;\n  EXPECT_TRUE(ReadFromFile(fd, buf, len1, &bytes_read));\n  EXPECT_EQ(len1, bytes_read);\n  EXPECT_EQ(0, internal_memcmp(buf, str1, len1));\n  EXPECT_EQ((char)0, buf[len1 + 1]);\n  internal_memset(buf, 0, len1);\n  EXPECT_TRUE(ReadFromFile(fd, buf, len2, &bytes_read));\n  EXPECT_EQ(len2, bytes_read);\n  EXPECT_EQ(0, internal_memcmp(buf, str2, len2));\n  CloseFile(fd);\n\n  Unlink(tmpfile);\n}\n\nclass SanitizerCommonFileTest : public ::testing::TestWithParam<uptr> {\n  void SetUp() override {\n    data_.resize(GetParam());\n    std::generate(data_.begin(), data_.end(), [] { return rand() % 256; });\n\n    temp_file_name(file_name_, sizeof(file_name_),\n                   \"sanitizer_common.ReadFile.tmp.\");\n\n    if (FILE *f = fopen(file_name_, \"wb\")) {\n      if (!data_.empty())\n        fwrite(data_.data(), data_.size(), 1, f);\n      fclose(f);\n    }\n  }\n\n  void TearDown() override { Unlink(file_name_); }\n\n protected:\n  char file_name_[256];\n  std::vector<char> data_;\n};\n\nTEST_P(SanitizerCommonFileTest, ReadFileToBuffer) {\n  char *buff;\n  uptr size;\n  uptr len;\n  EXPECT_TRUE(ReadFileToBuffer(file_name_, &buff, &len, &size));\n  EXPECT_EQ(data_, std::vector<char>(buff, buff + size));\n  UnmapOrDie(buff, len);\n}\n\nTEST_P(SanitizerCommonFileTest, ReadFileToBufferHalf) {\n  char *buff;\n  uptr size;\n  uptr len;\n  data_.resize(data_.size() / 2);\n  EXPECT_TRUE(ReadFileToBuffer(file_name_, &buff, &len, &size, data_.size()));\n  EXPECT_EQ(data_, std::vector<char>(buff, buff + size));\n  UnmapOrDie(buff, len);\n}\n\nTEST_P(SanitizerCommonFileTest, ReadFileToVector) {\n  InternalMmapVector<char> buff;\n  EXPECT_TRUE(ReadFileToVector(file_name_, &buff));\n  EXPECT_EQ(data_, std::vector<char>(buff.begin(), buff.end()));\n}\n\nTEST_P(SanitizerCommonFileTest, ReadFileToVectorHalf) {\n  InternalMmapVector<char> buff;\n  data_.resize(data_.size() / 2);\n  EXPECT_TRUE(ReadFileToVector(file_name_, &buff, data_.size()));\n  EXPECT_EQ(data_, std::vector<char>(buff.begin(), buff.end()));\n}\n\nINSTANTIATE_TEST_SUITE_P(FileSizes, SanitizerCommonFileTest,\n                         ::testing::Values(0, 1, 7, 13, 32, 4096, 4097, 1048575,\n                                           1048576, 1048577));\n\nstatic const size_t kStrlcpyBufSize = 8;\nvoid test_internal_strlcpy(char *dbuf, const char *sbuf) {\n  uptr retval = 0;\n  retval = internal_strlcpy(dbuf, sbuf, kStrlcpyBufSize);\n  EXPECT_EQ(internal_strncmp(dbuf, sbuf, kStrlcpyBufSize - 1), 0);\n  EXPECT_EQ(internal_strlen(dbuf),\n            std::min(internal_strlen(sbuf), (uptr)(kStrlcpyBufSize - 1)));\n  EXPECT_EQ(retval, internal_strlen(sbuf));\n\n  // Test with shorter maxlen.\n  uptr maxlen = 2;\n  if (internal_strlen(sbuf) > maxlen) {\n    retval = internal_strlcpy(dbuf, sbuf, maxlen);\n    EXPECT_EQ(internal_strncmp(dbuf, sbuf, maxlen - 1), 0);\n    EXPECT_EQ(internal_strlen(dbuf), maxlen - 1);\n  }\n}\n\nTEST(SanitizerCommon, InternalStrFunctions) {\n  const char *haystack = \"haystack\";\n  EXPECT_EQ(haystack + 2, internal_strchr(haystack, 'y'));\n  EXPECT_EQ(haystack + 2, internal_strchrnul(haystack, 'y'));\n  EXPECT_EQ(0, internal_strchr(haystack, 'z'));\n  EXPECT_EQ(haystack + 8, internal_strchrnul(haystack, 'z'));\n\n  char dbuf[kStrlcpyBufSize] = {};\n  const char *samesizestr = \"1234567\";\n  const char *shortstr = \"123\";\n  const char *longerstr = \"123456789\";\n\n  // Test internal_strlcpy.\n  internal_strlcpy(dbuf, shortstr, 0);\n  EXPECT_EQ(dbuf[0], 0);\n  EXPECT_EQ(dbuf[0], 0);\n  test_internal_strlcpy(dbuf, samesizestr);\n  test_internal_strlcpy(dbuf, shortstr);\n  test_internal_strlcpy(dbuf, longerstr);\n\n  // Test internal_strlcat.\n  char dcatbuf[kStrlcpyBufSize] = {};\n  uptr retval = 0;\n  retval = internal_strlcat(dcatbuf, \"aaa\", 0);\n  EXPECT_EQ(internal_strlen(dcatbuf), (uptr)0);\n  EXPECT_EQ(retval, (uptr)3);\n\n  retval = internal_strlcat(dcatbuf, \"123\", kStrlcpyBufSize);\n  EXPECT_EQ(internal_strcmp(dcatbuf, \"123\"), 0);\n  EXPECT_EQ(internal_strlen(dcatbuf), (uptr)3);\n  EXPECT_EQ(retval, (uptr)3);\n\n  retval = internal_strlcat(dcatbuf, \"123\", kStrlcpyBufSize);\n  EXPECT_EQ(internal_strcmp(dcatbuf, \"123123\"), 0);\n  EXPECT_EQ(internal_strlen(dcatbuf), (uptr)6);\n  EXPECT_EQ(retval, (uptr)6);\n\n  retval = internal_strlcat(dcatbuf, \"123\", kStrlcpyBufSize);\n  EXPECT_EQ(internal_strcmp(dcatbuf, \"1231231\"), 0);\n  EXPECT_EQ(internal_strlen(dcatbuf), (uptr)7);\n  EXPECT_EQ(retval, (uptr)9);\n}\n\nTEST(SanitizerCommon, InternalWideStringFunctions) {\n  const wchar_t *emptystr = L\"\";\n  const wchar_t *samesizestr = L\"1234567\";\n  const wchar_t *shortstr = L\"123\";\n  const wchar_t *longerstr = L\"123456789\";\n\n  ASSERT_EQ(internal_wcslen(emptystr), 0ul);\n  ASSERT_EQ(internal_wcslen(samesizestr), 7ul);\n  ASSERT_EQ(internal_wcslen(shortstr), 3ul);\n  ASSERT_EQ(internal_wcslen(longerstr), 9ul);\n\n  ASSERT_EQ(internal_wcsnlen(emptystr, 7), 0ul);\n  ASSERT_EQ(internal_wcsnlen(samesizestr, 7), 7ul);\n  ASSERT_EQ(internal_wcsnlen(shortstr, 7), 3ul);\n  ASSERT_EQ(internal_wcsnlen(longerstr, 7), 7ul);\n}\n\n// FIXME: File manipulations are not yet supported on Windows\n#if SANITIZER_POSIX && !SANITIZER_MAC\nTEST(SanitizerCommon, InternalMmapWithOffset) {\n  char tmpfile[128];\n  temp_file_name(tmpfile, sizeof(tmpfile),\n                 \"sanitizer_common.internalmmapwithoffset.tmp.\");\n  fd_t fd = OpenFile(tmpfile, RdWr);\n  ASSERT_NE(fd, kInvalidFd);\n\n  uptr page_size = GetPageSizeCached();\n  uptr res = internal_ftruncate(fd, page_size * 2);\n  ASSERT_FALSE(internal_iserror(res));\n\n  res = internal_lseek(fd, page_size, SEEK_SET);\n  ASSERT_FALSE(internal_iserror(res));\n\n  res = internal_write(fd, \"AB\", 2);\n  ASSERT_FALSE(internal_iserror(res));\n\n  char *p = (char *)MapWritableFileToMemory(nullptr, page_size, fd, page_size);\n  ASSERT_NE(nullptr, p);\n\n  ASSERT_EQ('A', p[0]);\n  ASSERT_EQ('B', p[1]);\n\n  CloseFile(fd);\n  UnmapOrDie(p, page_size);\n  internal_unlink(tmpfile);\n}\n#endif\n\nTEST(SanitizerCommon, ReportFile) {\n  SpinMutex report_file_mu;\n  ReportFile report_file = {&report_file_mu, kStderrFd, \"\", \"\", 0};\n  char tmpfile[128];\n  temp_file_name(tmpfile, sizeof(tmpfile),\n                 \"dir/sanitizer_common.reportfile.tmp.\");\n  report_file.SetReportPath(tmpfile);\n  const char *path = report_file.GetReportPath();\n  EXPECT_EQ(internal_strncmp(tmpfile, path, strlen(tmpfile)), 0);\n  // This will close tmpfile.\n  report_file.SetReportPath(\"stderr\");\n  Unlink(tmpfile);\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_linux_test.cpp",
    "content": "//===-- sanitizer_linux_test.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for sanitizer_linux.h\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if SANITIZER_LINUX\n\n#include \"sanitizer_common/sanitizer_linux.h\"\n\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_file.h\"\n#include \"gtest/gtest.h\"\n\n#include <pthread.h>\n#include <sched.h>\n#include <stdlib.h>\n\n#include <algorithm>\n#include <vector>\n\nnamespace __sanitizer {\n\nstruct TidReporterArgument {\n  TidReporterArgument() {\n    pthread_mutex_init(&terminate_thread_mutex, NULL);\n    pthread_mutex_init(&tid_reported_mutex, NULL);\n    pthread_cond_init(&terminate_thread_cond, NULL);\n    pthread_cond_init(&tid_reported_cond, NULL);\n    terminate_thread = false;\n  }\n\n  ~TidReporterArgument() {\n    pthread_mutex_destroy(&terminate_thread_mutex);\n    pthread_mutex_destroy(&tid_reported_mutex);\n    pthread_cond_destroy(&terminate_thread_cond);\n    pthread_cond_destroy(&tid_reported_cond);\n  }\n\n  tid_t reported_tid;\n  // For signaling to spawned threads that they should terminate.\n  pthread_cond_t terminate_thread_cond;\n  pthread_mutex_t terminate_thread_mutex;\n  bool terminate_thread;\n  // For signaling to main thread that a child thread has reported its tid.\n  pthread_cond_t tid_reported_cond;\n  pthread_mutex_t tid_reported_mutex;\n\n private:\n  // Disallow evil constructors\n  TidReporterArgument(const TidReporterArgument &);\n  void operator=(const TidReporterArgument &);\n};\n\nclass ThreadListerTest : public ::testing::Test {\n protected:\n  virtual void SetUp() {\n    pthread_t pthread_id;\n    tid_t tid;\n    for (uptr i = 0; i < kThreadCount; i++) {\n      SpawnTidReporter(&pthread_id, &tid);\n      pthread_ids_.push_back(pthread_id);\n      tids_.push_back(tid);\n    }\n  }\n\n  virtual void TearDown() {\n    pthread_mutex_lock(&thread_arg.terminate_thread_mutex);\n    thread_arg.terminate_thread = true;\n    pthread_cond_broadcast(&thread_arg.terminate_thread_cond);\n    pthread_mutex_unlock(&thread_arg.terminate_thread_mutex);\n    for (uptr i = 0; i < pthread_ids_.size(); i++)\n      pthread_join(pthread_ids_[i], NULL);\n  }\n\n  void SpawnTidReporter(pthread_t *pthread_id, tid_t *tid);\n\n  static const uptr kThreadCount = 20;\n\n  std::vector<pthread_t> pthread_ids_;\n  std::vector<tid_t> tids_;\n\n  TidReporterArgument thread_arg;\n};\n\n// Writes its TID once to reported_tid and waits until signaled to terminate.\nvoid *TidReporterThread(void *argument) {\n  TidReporterArgument *arg = reinterpret_cast<TidReporterArgument *>(argument);\n  pthread_mutex_lock(&arg->tid_reported_mutex);\n  arg->reported_tid = GetTid();\n  pthread_cond_broadcast(&arg->tid_reported_cond);\n  pthread_mutex_unlock(&arg->tid_reported_mutex);\n\n  pthread_mutex_lock(&arg->terminate_thread_mutex);\n  while (!arg->terminate_thread)\n    pthread_cond_wait(&arg->terminate_thread_cond,\n                      &arg->terminate_thread_mutex);\n  pthread_mutex_unlock(&arg->terminate_thread_mutex);\n  return NULL;\n}\n\nvoid ThreadListerTest::SpawnTidReporter(pthread_t *pthread_id, tid_t *tid) {\n  pthread_mutex_lock(&thread_arg.tid_reported_mutex);\n  thread_arg.reported_tid = -1;\n  ASSERT_EQ(0, pthread_create(pthread_id, NULL,\n                              TidReporterThread,\n                              &thread_arg));\n  while (thread_arg.reported_tid == (tid_t)(-1))\n    pthread_cond_wait(&thread_arg.tid_reported_cond,\n                      &thread_arg.tid_reported_mutex);\n  pthread_mutex_unlock(&thread_arg.tid_reported_mutex);\n  *tid = thread_arg.reported_tid;\n}\n\nstatic std::vector<tid_t> ReadTidsToVector(ThreadLister *thread_lister) {\n  std::vector<tid_t> listed_tids;\n  InternalMmapVector<tid_t> threads(128);\n  EXPECT_TRUE(thread_lister->ListThreads(&threads));\n  return std::vector<tid_t>(threads.begin(), threads.end());\n}\n\nstatic bool Includes(std::vector<tid_t> first, std::vector<tid_t> second) {\n  std::sort(first.begin(), first.end());\n  std::sort(second.begin(), second.end());\n  return std::includes(first.begin(), first.end(),\n                       second.begin(), second.end());\n}\n\nstatic bool HasElement(const std::vector<tid_t> &vector, tid_t element) {\n  return std::find(vector.begin(), vector.end(), element) != vector.end();\n}\n\n// ThreadLister's output should include the current thread's TID and the TID of\n// every thread we spawned.\nTEST_F(ThreadListerTest, ThreadListerSeesAllSpawnedThreads) {\n  tid_t self_tid = GetTid();\n  ThreadLister thread_lister(getpid());\n  std::vector<tid_t> listed_tids = ReadTidsToVector(&thread_lister);\n  ASSERT_TRUE(HasElement(listed_tids, self_tid));\n  ASSERT_TRUE(Includes(listed_tids, tids_));\n}\n\nTEST_F(ThreadListerTest, DoNotForgetThreads) {\n  ThreadLister thread_lister(getpid());\n\n  // Run the loop body twice, because ThreadLister might behave differently if\n  // called on a freshly created object.\n  for (uptr i = 0; i < 2; i++) {\n    std::vector<tid_t> listed_tids = ReadTidsToVector(&thread_lister);\n    ASSERT_TRUE(Includes(listed_tids, tids_));\n  }\n}\n\n// If new threads have spawned during ThreadLister object's lifetime, calling\n// relisting should cause ThreadLister to recognize their existence.\nTEST_F(ThreadListerTest, NewThreads) {\n  ThreadLister thread_lister(getpid());\n  std::vector<tid_t> threads_before_extra = ReadTidsToVector(&thread_lister);\n\n  pthread_t extra_pthread_id;\n  tid_t extra_tid;\n  SpawnTidReporter(&extra_pthread_id, &extra_tid);\n  // Register the new thread so it gets terminated in TearDown().\n  pthread_ids_.push_back(extra_pthread_id);\n\n  // It would be very bizarre if the new TID had been listed before we even\n  // spawned that thread, but it would also cause a false success in this test,\n  // so better check for that.\n  ASSERT_FALSE(HasElement(threads_before_extra, extra_tid));\n\n  std::vector<tid_t> threads_after_extra = ReadTidsToVector(&thread_lister);\n  ASSERT_TRUE(HasElement(threads_after_extra, extra_tid));\n}\n\nTEST(SanitizerCommon, SetEnvTest) {\n  const char kEnvName[] = \"ENV_FOO\";\n  SetEnv(kEnvName, \"value\");\n  EXPECT_STREQ(\"value\", getenv(kEnvName));\n  unsetenv(kEnvName);\n  EXPECT_EQ(0, getenv(kEnvName));\n}\n\n#if (defined(__x86_64__) || defined(__i386__)) && !SANITIZER_ANDROID\n// libpthread puts the thread descriptor at the end of stack space.\nvoid *thread_descriptor_size_test_func(void *arg) {\n  uptr descr_addr = (uptr)pthread_self();\n  pthread_attr_t attr;\n  pthread_getattr_np(pthread_self(), &attr);\n  void *stackaddr;\n  size_t stacksize;\n  pthread_attr_getstack(&attr, &stackaddr, &stacksize);\n  return (void *)((uptr)stackaddr + stacksize - descr_addr);\n}\n\nTEST(SanitizerLinux, ThreadDescriptorSize) {\n  pthread_t tid;\n  void *result;\n  ASSERT_EQ(0, pthread_create(&tid, 0, thread_descriptor_size_test_func, 0));\n  ASSERT_EQ(0, pthread_join(tid, &result));\n  EXPECT_EQ((uptr)result, ThreadDescriptorSize());\n}\n#endif\n\nTEST(SanitizerCommon, LibraryNameIs) {\n  EXPECT_FALSE(LibraryNameIs(\"\", \"\"));\n\n  char full_name[256];\n  const char *paths[] = { \"\", \"/\", \"/path/to/\" };\n  const char *suffixes[] = { \"\", \"-linux\", \".1.2\", \"-linux.1.2\" };\n  const char *base_names[] = { \"lib\", \"lib.0\", \"lib-i386\" };\n  const char *wrong_names[] = { \"\", \"lib.9\", \"lib-x86_64\" };\n  for (uptr i = 0; i < ARRAY_SIZE(paths); i++)\n    for (uptr j = 0; j < ARRAY_SIZE(suffixes); j++) {\n      for (uptr k = 0; k < ARRAY_SIZE(base_names); k++) {\n        internal_snprintf(full_name, ARRAY_SIZE(full_name), \"%s%s%s.so\",\n                          paths[i], base_names[k], suffixes[j]);\n        EXPECT_TRUE(LibraryNameIs(full_name, base_names[k]))\n            << \"Full name \" << full_name\n            << \" doesn't match base name \" << base_names[k];\n        for (uptr m = 0; m < ARRAY_SIZE(wrong_names); m++)\n          EXPECT_FALSE(LibraryNameIs(full_name, wrong_names[m]))\n            << \"Full name \" << full_name\n            << \" matches base name \" << wrong_names[m];\n      }\n    }\n}\n\n#if defined(__mips64)\n// Effectively, this is a test for ThreadDescriptorSize() which is used to\n// compute ThreadSelf().\nTEST(SanitizerLinux, ThreadSelfTest) {\n  ASSERT_EQ(pthread_self(), ThreadSelf());\n}\n#endif\n\nTEST(SanitizerCommon, StartSubprocessTest) {\n  int pipe_fds[2];\n  ASSERT_EQ(0, pipe(pipe_fds));\n#if SANITIZER_ANDROID\n  const char *shell = \"/system/bin/sh\";\n#else\n  const char *shell = \"/bin/sh\";\n#endif\n  const char *argv[] = {shell, \"-c\", \"echo -n 'hello'\", (char *)NULL};\n  int pid = StartSubprocess(shell, argv, GetEnviron(),\n                            /* stdin */ kInvalidFd, /* stdout */ pipe_fds[1]);\n  ASSERT_GT(pid, 0);\n\n  // wait for process to finish.\n  while (IsProcessRunning(pid)) {\n  }\n  ASSERT_FALSE(IsProcessRunning(pid));\n\n  char buffer[256];\n  {\n    char *ptr = buffer;\n    uptr bytes_read;\n    while (ReadFromFile(pipe_fds[0], ptr, 256, &bytes_read)) {\n      if (!bytes_read) {\n        break;\n      }\n      ptr += bytes_read;\n    }\n    ASSERT_EQ(5, ptr - buffer);\n    *ptr = 0;\n  }\n  ASSERT_EQ(0, strcmp(buffer, \"hello\")) << \"Buffer: \" << buffer;\n  internal_close(pipe_fds[0]);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_LINUX\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_list_test.cpp",
    "content": "//===-- sanitizer_list_test.cpp -------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_list.h\"\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nstruct ListItem {\n  ListItem *next;\n};\n\ntypedef IntrusiveList<ListItem> List;\n\nstatic List static_list;\n\nstatic void SetList(List *l, ListItem *x = 0,\n                    ListItem *y = 0, ListItem *z = 0) {\n  l->clear();\n  if (x) l->push_back(x);\n  if (y) l->push_back(y);\n  if (z) l->push_back(z);\n}\n\nstatic void CheckList(List *l, ListItem *i1, ListItem *i2 = 0, ListItem *i3 = 0,\n                      ListItem *i4 = 0, ListItem *i5 = 0, ListItem *i6 = 0) {\n  if (i1) {\n    CHECK_EQ(l->front(), i1);\n    l->pop_front();\n  }\n  if (i2) {\n    CHECK_EQ(l->front(), i2);\n    l->pop_front();\n  }\n  if (i3) {\n    CHECK_EQ(l->front(), i3);\n    l->pop_front();\n  }\n  if (i4) {\n    CHECK_EQ(l->front(), i4);\n    l->pop_front();\n  }\n  if (i5) {\n    CHECK_EQ(l->front(), i5);\n    l->pop_front();\n  }\n  if (i6) {\n    CHECK_EQ(l->front(), i6);\n    l->pop_front();\n  }\n  CHECK(l->empty());\n}\n\nTEST(SanitizerCommon, IntrusiveList) {\n  ListItem items[6];\n  CHECK_EQ(static_list.size(), 0);\n\n  List l;\n  l.clear();\n\n  ListItem *x = &items[0];\n  ListItem *y = &items[1];\n  ListItem *z = &items[2];\n  ListItem *a = &items[3];\n  ListItem *b = &items[4];\n  ListItem *c = &items[5];\n\n  CHECK_EQ(l.size(), 0);\n  l.push_back(x);\n  CHECK_EQ(l.size(), 1);\n  CHECK_EQ(l.back(), x);\n  CHECK_EQ(l.front(), x);\n  l.pop_front();\n  CHECK(l.empty());\n  l.CheckConsistency();\n\n  l.push_front(x);\n  CHECK_EQ(l.size(), 1);\n  CHECK_EQ(l.back(), x);\n  CHECK_EQ(l.front(), x);\n  l.pop_front();\n  CHECK(l.empty());\n  l.CheckConsistency();\n\n  l.push_front(x);\n  l.push_front(y);\n  l.push_front(z);\n  CHECK_EQ(l.size(), 3);\n  CHECK_EQ(l.front(), z);\n  CHECK_EQ(l.back(), x);\n  l.CheckConsistency();\n\n  l.pop_front();\n  CHECK_EQ(l.size(), 2);\n  CHECK_EQ(l.front(), y);\n  CHECK_EQ(l.back(), x);\n  l.pop_front();\n  l.pop_front();\n  CHECK(l.empty());\n  l.CheckConsistency();\n\n  l.push_back(x);\n  l.push_back(y);\n  l.push_back(z);\n  CHECK_EQ(l.size(), 3);\n  CHECK_EQ(l.front(), x);\n  CHECK_EQ(l.back(), z);\n  l.CheckConsistency();\n\n  l.pop_front();\n  CHECK_EQ(l.size(), 2);\n  CHECK_EQ(l.front(), y);\n  CHECK_EQ(l.back(), z);\n  l.pop_front();\n  l.pop_front();\n  CHECK(l.empty());\n  l.CheckConsistency();\n\n  l.push_back(x);\n  l.push_back(y);\n  l.push_back(z);\n  l.extract(x, y);\n  CHECK_EQ(l.size(), 2);\n  CHECK_EQ(l.front(), x);\n  CHECK_EQ(l.back(), z);\n  l.CheckConsistency();\n  l.extract(x, z);\n  CHECK_EQ(l.size(), 1);\n  CHECK_EQ(l.front(), x);\n  CHECK_EQ(l.back(), x);\n  l.CheckConsistency();\n  l.pop_front();\n  CHECK(l.empty());\n\n  List l1, l2;\n  l1.clear();\n  l2.clear();\n\n  l1.append_front(&l2);\n  CHECK(l1.empty());\n  CHECK(l2.empty());\n\n  l1.append_back(&l2);\n  CHECK(l1.empty());\n  CHECK(l2.empty());\n\n  SetList(&l1, x);\n  CheckList(&l1, x);\n\n  SetList(&l1, x, y, z);\n  SetList(&l2, a, b, c);\n  l1.append_back(&l2);\n  CheckList(&l1, x, y, z, a, b, c);\n  CHECK(l2.empty());\n\n  SetList(&l1, x, y);\n  SetList(&l2);\n  l1.append_front(&l2);\n  CheckList(&l1, x, y);\n  CHECK(l2.empty());\n}\n\nTEST(SanitizerCommon, IntrusiveListAppendEmpty) {\n  ListItem i;\n  List l;\n  l.clear();\n  l.push_back(&i);\n  List l2;\n  l2.clear();\n  l.append_back(&l2);\n  CHECK_EQ(l.back(), &i);\n  CHECK_EQ(l.front(), &i);\n  CHECK_EQ(l.size(), 1);\n  l.append_front(&l2);\n  CHECK_EQ(l.back(), &i);\n  CHECK_EQ(l.front(), &i);\n  CHECK_EQ(l.size(), 1);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_lzw_test.cpp",
    "content": "//===-- sanitizer_lzw_test.cpp ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_lzw.h\"\n\n#include <iterator>\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_hash.h\"\n\nnamespace __sanitizer {\n\ntemplate <typename T>\nstruct LzwTest : public ::testing::Test {\n  template <typename Generator>\n  void Run(size_t n, Generator gen) {\n    std::vector<T> data(n);\n    std::generate(data.begin(), data.end(), gen);\n\n    std::vector<u64> lzw;\n    LzwEncode<T>(data.begin(), data.end(), std::back_inserter(lzw));\n\n    std::vector<T> unlzw(data.size() * 2);\n    auto unlzw_end = LzwDecode<T>(lzw.begin(), lzw.end(), unlzw.data());\n    unlzw.resize(unlzw_end - unlzw.data());\n\n    EXPECT_EQ(data, unlzw);\n  }\n};\n\nstatic constexpr size_t kSizes[] = {0, 1, 2, 7, 13, 32, 129, 10000};\n\nusing LzwTestTypes = ::testing::Types<u8, u16, u32, u64>;\nTYPED_TEST_SUITE(LzwTest, LzwTestTypes, );\n\nTYPED_TEST(LzwTest, Same) {\n  MurMur2Hash64Builder h(0);\n  for (size_t sz : kSizes) {\n    u64 v = 0;\n    for (size_t i = 0; i < 100 && !this->HasFailure(); ++i) {\n      this->Run(sz, [&] { return v; });\n      h.add(i);\n      v = h.get();\n    }\n  }\n}\n\nTYPED_TEST(LzwTest, Increment) {\n  MurMur2Hash64Builder h(0);\n  for (size_t sz : kSizes) {\n    u64 v = 0;\n    for (size_t i = 0; i < 100 && !this->HasFailure(); ++i) {\n      this->Run(sz, [&v] { return v++; });\n      h.add(i);\n      v = h.get();\n    }\n  }\n}\n\nTYPED_TEST(LzwTest, IncrementMod) {\n  MurMur2Hash64Builder h(0);\n  for (size_t sz : kSizes) {\n    u64 v = 0;\n    for (size_t i = 1; i < 16 && !this->HasFailure(); ++i) {\n      this->Run(sz, [&] { return v++ % i; });\n      h.add(i);\n      v = h.get();\n    }\n  }\n}\n\nTYPED_TEST(LzwTest, RandomLimited) {\n  for (size_t sz : kSizes) {\n    for (size_t i = 1; i < 1000 && !this->HasFailure(); i *= 2) {\n      u64 v = 0;\n      this->Run(sz, [&] {\n        MurMur2Hash64Builder h(v % i /* Keep unique set limited */);\n        v = h.get();\n        return v;\n      });\n    }\n  }\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_mac_test.cpp",
    "content": "//===-- sanitizer_mac_test.cpp --------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for sanitizer_mac.{h,cpp}\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if SANITIZER_MAC\n\n#include \"sanitizer_common/sanitizer_mac.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <sys/sysctl.h>  // sysctlbyname\n#include <mach/kern_return.h>  // KERN_SUCCESS\n\nnamespace __sanitizer {\n\nvoid ParseVersion(const char *vers, u16 *major, u16 *minor);\n\nTEST(SanitizerMac, ParseVersion) {\n  u16 major, minor;\n\n  ParseVersion(\"11.22.33\", &major, &minor);\n  EXPECT_EQ(major, 11);\n  EXPECT_EQ(minor, 22);\n\n  ParseVersion(\"1.2\", &major, &minor);\n  EXPECT_EQ(major, 1);\n  EXPECT_EQ(minor, 2);\n}\n\n// TODO(yln): Run sanitizer unit tests for the simulators (rdar://65680742)\n#if SANITIZER_IOSSIM\nTEST(SanitizerMac, GetMacosAlignedVersion) {\n  const char *vers_str;\n  if (SANITIZER_IOS || SANITIZER_TVOS) {\n    vers_str = \"13.0\";\n  } else if (SANITIZER_WATCHOS) {\n    vers_str = \"6.5\";\n  } else {\n    FAIL() << \"unsupported simulator runtime\";\n  }\n  setenv(\"SIMULATOR_RUNTIME_VERSION\", vers_str, /*overwrite=*/1);\n\n  MacosVersion vers = GetMacosAlignedVersion();\n  EXPECT_EQ(vers.major, 10);\n  EXPECT_EQ(vers.minor, 15);\n}\n#else\nTEST(SanitizerMac, GetMacosAlignedVersion) {\n  MacosVersion vers = GetMacosAlignedVersion();\n  std::ostringstream oss;\n  oss << vers.major << '.' << vers.minor;\n  std::string actual = oss.str();\n\n  char buf[100];\n  size_t len = sizeof(buf);\n  int res = sysctlbyname(\"kern.osproductversion\", buf, &len, nullptr, 0);\n  ASSERT_EQ(res, KERN_SUCCESS);\n  std::string expected(buf);\n\n  // Prefix match\n  ASSERT_EQ(expected.compare(0, actual.size(), actual), 0);\n}\n#endif\n\nTEST(SanitizerMac, GetDarwinKernelVersion) {\n  DarwinKernelVersion vers = GetDarwinKernelVersion();\n  std::ostringstream oss;\n  oss << vers.major << '.' << vers.minor;\n  std::string actual = oss.str();\n\n  char buf[100];\n  size_t len = sizeof(buf);\n  int res = sysctlbyname(\"kern.osrelease\", buf, &len, nullptr, 0);\n  ASSERT_EQ(res, KERN_SUCCESS);\n  std::string expected(buf);\n\n  // Prefix match\n  ASSERT_EQ(expected.compare(0, actual.size(), actual), 0);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_MAC\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_mutex_test.cpp",
    "content": "//===-- sanitizer_mutex_test.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_mutex.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n\n#include \"sanitizer_pthread_wrappers.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <string.h>\n\nnamespace __sanitizer {\n\ntemplate<typename MutexType>\nclass TestData {\n public:\n  explicit TestData(MutexType *mtx)\n      : mtx_(mtx) {\n    for (int i = 0; i < kSize; i++)\n      data_[i] = 0;\n  }\n\n  void Write() {\n    Lock l(mtx_);\n    T v0 = data_[0];\n    for (int i = 0; i < kSize; i++) {\n      mtx_->CheckLocked();\n      CHECK_EQ(data_[i], v0);\n      data_[i]++;\n    }\n  }\n\n  void TryWrite() {\n    if (!mtx_->TryLock())\n      return;\n    T v0 = data_[0];\n    for (int i = 0; i < kSize; i++) {\n      mtx_->CheckLocked();\n      CHECK_EQ(data_[i], v0);\n      data_[i]++;\n    }\n    mtx_->Unlock();\n  }\n\n  void Read() {\n    ReadLock l(mtx_);\n    T v0 = data_[0];\n    for (int i = 0; i < kSize; i++) {\n      mtx_->CheckReadLocked();\n      CHECK_EQ(data_[i], v0);\n    }\n  }\n\n  void Backoff() {\n    volatile T data[kSize] = {};\n    for (int i = 0; i < kSize; i++) {\n      data[i]++;\n      CHECK_EQ(data[i], 1);\n    }\n  }\n\n private:\n  typedef GenericScopedLock<MutexType> Lock;\n  typedef GenericScopedReadLock<MutexType> ReadLock;\n  static const int kSize = 64;\n  typedef u64 T;\n  MutexType *mtx_;\n  char pad_[kCacheLineSize];\n  T data_[kSize];\n};\n\nconst int kThreads = 8;\n#if SANITIZER_DEBUG\nconst int kIters = 16*1024;\n#else\nconst int kIters = 64*1024;\n#endif\n\ntemplate<typename MutexType>\nstatic void *lock_thread(void *param) {\n  TestData<MutexType> *data = (TestData<MutexType>*)param;\n  for (int i = 0; i < kIters; i++) {\n    data->Write();\n    data->Backoff();\n  }\n  return 0;\n}\n\ntemplate<typename MutexType>\nstatic void *try_thread(void *param) {\n  TestData<MutexType> *data = (TestData<MutexType>*)param;\n  for (int i = 0; i < kIters; i++) {\n    data->TryWrite();\n    data->Backoff();\n  }\n  return 0;\n}\n\ntemplate <typename MutexType>\nstatic void *read_write_thread(void *param) {\n  TestData<MutexType> *data = (TestData<MutexType> *)param;\n  for (int i = 0; i < kIters; i++) {\n    if ((i % 10) == 0)\n      data->Write();\n    else\n      data->Read();\n    data->Backoff();\n  }\n  return 0;\n}\n\ntemplate<typename MutexType>\nstatic void check_locked(MutexType *mtx) {\n  GenericScopedLock<MutexType> l(mtx);\n  mtx->CheckLocked();\n}\n\nTEST(SanitizerCommon, SpinMutex) {\n  SpinMutex mtx;\n  mtx.Init();\n  TestData<SpinMutex> data(&mtx);\n  pthread_t threads[kThreads];\n  for (int i = 0; i < kThreads; i++)\n    PTHREAD_CREATE(&threads[i], 0, lock_thread<SpinMutex>, &data);\n  for (int i = 0; i < kThreads; i++)\n    PTHREAD_JOIN(threads[i], 0);\n}\n\nTEST(SanitizerCommon, SpinMutexTry) {\n  SpinMutex mtx;\n  mtx.Init();\n  TestData<SpinMutex> data(&mtx);\n  pthread_t threads[kThreads];\n  for (int i = 0; i < kThreads; i++)\n    PTHREAD_CREATE(&threads[i], 0, try_thread<SpinMutex>, &data);\n  for (int i = 0; i < kThreads; i++)\n    PTHREAD_JOIN(threads[i], 0);\n}\n\nTEST(SanitizerCommon, Mutex) {\n  Mutex mtx;\n  TestData<Mutex> data(&mtx);\n  pthread_t threads[kThreads];\n  for (int i = 0; i < kThreads; i++)\n    PTHREAD_CREATE(&threads[i], 0, read_write_thread<Mutex>, &data);\n  for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0);\n}\n\nstruct SemaphoreData {\n  Semaphore *sem;\n  bool done;\n};\n\nvoid *SemaphoreThread(void *arg) {\n  auto data = static_cast<SemaphoreData *>(arg);\n  data->sem->Wait();\n  data->done = true;\n  return nullptr;\n}\n\nTEST(SanitizerCommon, Semaphore) {\n  Semaphore sem;\n  sem.Post(1);\n  sem.Wait();\n  sem.Post(3);\n  sem.Wait();\n  sem.Wait();\n  sem.Wait();\n\n  SemaphoreData data = {&sem, false};\n  pthread_t thread;\n  PTHREAD_CREATE(&thread, nullptr, SemaphoreThread, &data);\n  internal_sleep(1);\n  CHECK(!data.done);\n  sem.Post(1);\n  PTHREAD_JOIN(thread, nullptr);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_nolibc_test.cpp",
    "content": "//===-- sanitizer_nolibc_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Tests for libc independence of sanitizer_common.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <stdlib.h>\n\nextern const char *argv0;\n\n#if SANITIZER_LINUX && defined(__x86_64__)\nTEST(SanitizerCommon, NolibcMain) {\n  std::string NolibcTestPath = argv0;\n  NolibcTestPath += \"-Nolibc\";\n  int status = system(NolibcTestPath.c_str());\n  EXPECT_EQ(true, WIFEXITED(status));\n  EXPECT_EQ(0, WEXITSTATUS(status));\n}\n#endif\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_nolibc_test_main.cpp",
    "content": "//===-- sanitizer_nolibc_test_main.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n// Tests for libc independence of sanitizer_common.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_libc.h\"\n\nextern \"C\" void _start() {\n  __sanitizer::internal__exit(0);\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_posix_test.cpp",
    "content": "//===-- sanitizer_posix_test.cpp ------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for POSIX-specific code.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if SANITIZER_POSIX\n\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"gtest/gtest.h\"\n\n#include <pthread.h>\n#include <sys/mman.h>\n\nnamespace __sanitizer {\n\nstatic pthread_key_t key;\nstatic bool destructor_executed;\n\nextern \"C\"\nvoid destructor(void *arg) {\n  uptr iter = reinterpret_cast<uptr>(arg);\n  if (iter > 1) {\n    ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void *>(iter - 1)));\n    return;\n  }\n  destructor_executed = true;\n}\n\nextern \"C\"\nvoid *thread_func(void *arg) {\n  return reinterpret_cast<void*>(pthread_setspecific(key, arg));\n}\n\nstatic void SpawnThread(uptr iteration) {\n  destructor_executed = false;\n  pthread_t tid;\n  ASSERT_EQ(0, pthread_create(&tid, 0, &thread_func,\n                              reinterpret_cast<void *>(iteration)));\n  void *retval;\n  ASSERT_EQ(0, pthread_join(tid, &retval));\n  ASSERT_EQ(0, retval);\n}\n\nTEST(SanitizerCommon, PthreadDestructorIterations) {\n  ASSERT_EQ(0, pthread_key_create(&key, &destructor));\n  SpawnThread(GetPthreadDestructorIterations());\n  EXPECT_TRUE(destructor_executed);\n  SpawnThread(GetPthreadDestructorIterations() + 1);\n#if SANITIZER_SOLARIS\n  // Solaris continues calling destructors beyond PTHREAD_DESTRUCTOR_ITERATIONS.\n  EXPECT_TRUE(destructor_executed);\n#else\n  EXPECT_FALSE(destructor_executed);\n#endif\n  ASSERT_EQ(0, pthread_key_delete(key));\n}\n\nTEST(SanitizerCommon, IsAccessibleMemoryRange) {\n  const int page_size = GetPageSize();\n  uptr mem = (uptr)mmap(0, 3 * page_size, PROT_READ | PROT_WRITE,\n                        MAP_PRIVATE | MAP_ANON, -1, 0);\n  // Protect the middle page.\n  mprotect((void *)(mem + page_size), page_size, PROT_NONE);\n  EXPECT_TRUE(IsAccessibleMemoryRange(mem, page_size - 1));\n  EXPECT_TRUE(IsAccessibleMemoryRange(mem, page_size));\n  EXPECT_FALSE(IsAccessibleMemoryRange(mem, page_size + 1));\n  EXPECT_TRUE(IsAccessibleMemoryRange(mem + page_size - 1, 1));\n  EXPECT_FALSE(IsAccessibleMemoryRange(mem + page_size - 1, 2));\n  EXPECT_FALSE(IsAccessibleMemoryRange(mem + 2 * page_size - 1, 1));\n  EXPECT_TRUE(IsAccessibleMemoryRange(mem + 2 * page_size, page_size));\n  EXPECT_FALSE(IsAccessibleMemoryRange(mem, 3 * page_size));\n  EXPECT_FALSE(IsAccessibleMemoryRange(0x0, 2));\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_POSIX\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_printf_test.cpp",
    "content": "//===-- sanitizer_printf_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for sanitizer_printf.cpp\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n#include \"gtest/gtest.h\"\n\n#include <string.h>\n#include <limits.h>\n\nnamespace __sanitizer {\n\nTEST(Printf, Basic) {\n  char buf[1024];\n  uptr len = internal_snprintf(\n      buf, sizeof(buf), \"a%db%zdc%ue%zuf%xh%zxq%pe%sr\", (int)-1, (uptr)-2,\n      (unsigned)-4, (uptr)5, (unsigned)10, (uptr)11, (void *)0x123, \"_string_\");\n  EXPECT_EQ(len, strlen(buf));\n\n  std::string expectedString = \"a-1b-2c4294967292e5fahbq0x\";\n  expectedString += std::string(SANITIZER_POINTER_FORMAT_LENGTH - 3, '0');\n  expectedString += \"123e_string_r\";\n  EXPECT_STREQ(expectedString.c_str(), buf);\n}\n\nTEST(Printf, OverflowStr) {\n  char buf[] = \"123456789\";\n  uptr len = internal_snprintf(buf, 4, \"%s\", \"abcdef\");\n  EXPECT_EQ(len, (uptr)6);\n  EXPECT_STREQ(\"abc\", buf);\n  EXPECT_EQ(buf[3], 0);\n  EXPECT_EQ(buf[4], '5');\n  EXPECT_EQ(buf[5], '6');\n  EXPECT_EQ(buf[6], '7');\n  EXPECT_EQ(buf[7], '8');\n  EXPECT_EQ(buf[8], '9');\n  EXPECT_EQ(buf[9], 0);\n}\n\nTEST(Printf, OverflowInt) {\n  char buf[] = \"123456789\";\n  internal_snprintf(buf, 4, \"%d\", -123456789);\n  EXPECT_STREQ(\"-12\", buf);\n  EXPECT_EQ(buf[3], 0);\n  EXPECT_EQ(buf[4], '5');\n  EXPECT_EQ(buf[5], '6');\n  EXPECT_EQ(buf[6], '7');\n  EXPECT_EQ(buf[7], '8');\n  EXPECT_EQ(buf[8], '9');\n  EXPECT_EQ(buf[9], 0);\n}\n\nTEST(Printf, OverflowUint) {\n  char buf[] = \"123456789\";\n  uptr val;\n  if (sizeof(val) == 4) {\n    val = (uptr)0x12345678;\n  } else {\n    val = (uptr)0x123456789ULL;\n  }\n  internal_snprintf(buf, 4, \"a%zx\", val);\n  EXPECT_STREQ(\"a12\", buf);\n  EXPECT_EQ(buf[3], 0);\n  EXPECT_EQ(buf[4], '5');\n  EXPECT_EQ(buf[5], '6');\n  EXPECT_EQ(buf[6], '7');\n  EXPECT_EQ(buf[7], '8');\n  EXPECT_EQ(buf[8], '9');\n  EXPECT_EQ(buf[9], 0);\n}\n\nTEST(Printf, OverflowPtr) {\n  char buf[] = \"123456789\";\n  void *p;\n  if (sizeof(p) == 4) {\n    p = (void*)0x1234567;\n  } else {\n    p = (void*)0x123456789ULL;\n  }\n  internal_snprintf(buf, 4, \"%p\", p);\n  EXPECT_STREQ(\"0x0\", buf);\n  EXPECT_EQ(buf[3], 0);\n  EXPECT_EQ(buf[4], '5');\n  EXPECT_EQ(buf[5], '6');\n  EXPECT_EQ(buf[6], '7');\n  EXPECT_EQ(buf[7], '8');\n  EXPECT_EQ(buf[8], '9');\n  EXPECT_EQ(buf[9], 0);\n}\n\n#if defined(_WIN32)\n// Oh well, MSVS headers don't define snprintf.\n# define snprintf _snprintf\n#endif\n\ntemplate<typename T>\nstatic void TestAgainstLibc(const char *fmt, T arg1, T arg2) {\n  char buf[1024];\n  uptr len = internal_snprintf(buf, sizeof(buf), fmt, arg1, arg2);\n  char buf2[1024];\n  snprintf(buf2, sizeof(buf2), fmt, arg1, arg2);\n  EXPECT_EQ(len, strlen(buf));\n  EXPECT_STREQ(buf2, buf);\n}\n\nTEST(Printf, MinMax) {\n  TestAgainstLibc<int>(\"%d-%d\", INT_MIN, INT_MAX);\n  TestAgainstLibc<unsigned>(\"%u-%u\", 0, UINT_MAX);\n  TestAgainstLibc<unsigned>(\"%x-%x\", 0, UINT_MAX);\n  TestAgainstLibc<long>(\"%ld-%ld\", LONG_MIN, LONG_MAX);\n  TestAgainstLibc<unsigned long>(\"%lu-%lu\", 0, LONG_MAX);\n  TestAgainstLibc<unsigned long>(\"%lx-%lx\", 0, LONG_MAX);\n#if !defined(_WIN32)\n  // %z* format doesn't seem to be supported by MSVS.\n  TestAgainstLibc<long>(\"%zd-%zd\", LONG_MIN, LONG_MAX);\n  TestAgainstLibc<unsigned long>(\"%zu-%zu\", 0, ULONG_MAX);\n  TestAgainstLibc<unsigned long>(\"%zx-%zx\", 0, ULONG_MAX);\n#endif\n}\n\nTEST(Printf, Padding) {\n  TestAgainstLibc<int>(\"%3d - %3d\", 1, 0);\n  TestAgainstLibc<int>(\"%3d - %3d\", -1, 123);\n  TestAgainstLibc<int>(\"%3d - %3d\", -1, -123);\n  TestAgainstLibc<int>(\"%3d - %3d\", 12, 1234);\n  TestAgainstLibc<int>(\"%3d - %3d\", -12, -1234);\n  TestAgainstLibc<int>(\"%03d - %03d\", 1, 0);\n  TestAgainstLibc<int>(\"%03d - %03d\", -1, 123);\n  TestAgainstLibc<int>(\"%03d - %03d\", -1, -123);\n  TestAgainstLibc<int>(\"%03d - %03d\", 12, 1234);\n  TestAgainstLibc<int>(\"%03d - %03d\", -12, -1234);\n}\n\nTEST(Printf, Precision) {\n  char buf[1024];\n  uptr len = internal_snprintf(buf, sizeof(buf), \"%.*s\", 3, \"12345\");\n  EXPECT_EQ(3U, len);\n  EXPECT_STREQ(\"123\", buf);\n  len = internal_snprintf(buf, sizeof(buf), \"%.*s\", 6, \"12345\");\n  EXPECT_EQ(5U, len);\n  EXPECT_STREQ(\"12345\", buf);\n  len = internal_snprintf(buf, sizeof(buf), \"%-6s\", \"12345\");\n  EXPECT_EQ(6U, len);\n  EXPECT_STREQ(\"12345 \", buf);\n  // Check that width does not overflow the smaller buffer, although\n  // 10 chars is requested, it stops at the buffer size, 8.\n  len = internal_snprintf(buf, 8, \"%-10s\", \"12345\");\n  EXPECT_EQ(10U, len);  // The required size reported.\n  EXPECT_STREQ(\"12345  \", buf);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_procmaps_test.cpp",
    "content": "//===-- sanitizer_procmaps_test.cpp ---------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#if !defined(_WIN32)  // There are no /proc/maps on Windows.\n\n#  include \"sanitizer_common/sanitizer_procmaps.h\"\n\n#  include <stdlib.h>\n#  include <string.h>\n\n#  include <vector>\n\n#  include \"gtest/gtest.h\"\n\nstatic void noop() {}\nextern const char *argv0;\n\nnamespace __sanitizer {\n\n#if SANITIZER_LINUX && !SANITIZER_ANDROID\nTEST(MemoryMappingLayout, CodeRange) {\n  uptr start, end;\n  bool res = GetCodeRangeForFile(\"[vdso]\", &start, &end);\n  EXPECT_EQ(res, true);\n  EXPECT_GT(start, 0U);\n  EXPECT_LT(start, end);\n}\n#endif\n\nTEST(MemoryMappingLayout, DumpListOfModules) {\n  const char *last_slash = strrchr(argv0, '/');\n  const char *binary_name = last_slash ? last_slash + 1 : argv0;\n  MemoryMappingLayout memory_mapping(false);\n  const uptr kMaxModules = 100;\n  InternalMmapVector<LoadedModule> modules;\n  modules.reserve(kMaxModules);\n  memory_mapping.DumpListOfModules(&modules);\n  EXPECT_GT(modules.size(), 0U);\n  bool found = false;\n  for (uptr i = 0; i < modules.size(); ++i) {\n    if (modules[i].containsAddress((uptr)&noop)) {\n      // Verify that the module name is sane.\n      if (strstr(modules[i].full_name(), binary_name) != 0)\n        found = true;\n    }\n    modules[i].clear();\n  }\n  EXPECT_TRUE(found);\n}\n\nTEST(MemoryMapping, LoadedModuleArchAndUUID) {\n  if (SANITIZER_MAC) {\n    MemoryMappingLayout memory_mapping(false);\n    const uptr kMaxModules = 100;\n    InternalMmapVector<LoadedModule> modules;\n    modules.reserve(kMaxModules);\n    memory_mapping.DumpListOfModules(&modules);\n    for (uptr i = 0; i < modules.size(); ++i) {\n      ModuleArch arch = modules[i].arch();\n      // Darwin unit tests are only run on i386/x86_64/x86_64h.\n      if (SANITIZER_WORDSIZE == 32) {\n        EXPECT_EQ(arch, kModuleArchI386);\n      } else if (SANITIZER_WORDSIZE == 64) {\n        EXPECT_TRUE(arch == kModuleArchX86_64 || arch == kModuleArchX86_64H);\n      }\n      const u8 *uuid = modules[i].uuid();\n      u8 null_uuid[kModuleUUIDSize] = {0};\n      EXPECT_NE(memcmp(null_uuid, uuid, kModuleUUIDSize), 0);\n    }\n  }\n}\n\n#  if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \\\n       SANITIZER_SOLARIS) &&                                       \\\n      defined(_LP64)\nconst char *const parse_unix_input = R\"(\n7fb9862f1000-7fb9862f3000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nRss:                   4 kB\n7fb9864ae000-7fb9864b1000 r--p 001ba000 fd:01 22413919                   /lib/x86_64-linux-gnu/libc-2.32.so\nSize:                 12 kB\nRss:                  12 kB\n)\";\n\nTEST(MemoryMapping, ParseUnixMemoryProfile) {\n  struct entry {\n    uptr p;\n    uptr rss;\n    bool file;\n  };\n  typedef std::vector<entry> entries_t;\n  entries_t entries;\n  std::vector<char> input(parse_unix_input,\n                          parse_unix_input + strlen(parse_unix_input));\n  ParseUnixMemoryProfile(\n      [](uptr p, uptr rss, bool file, uptr *mem) {\n        reinterpret_cast<entries_t *>(mem)->push_back({p, rss, file});\n      },\n      reinterpret_cast<uptr *>(&entries), &input[0], input.size());\n  EXPECT_EQ(entries.size(), 2ul);\n  EXPECT_EQ(entries[0].p, 0x7fb9862f1000ul);\n  EXPECT_EQ(entries[0].rss, 4ul << 10);\n  EXPECT_EQ(entries[0].file, false);\n  EXPECT_EQ(entries[1].p, 0x7fb9864ae000ul);\n  EXPECT_EQ(entries[1].rss, 12ul << 10);\n  EXPECT_EQ(entries[1].file, true);\n}\n\nTEST(MemoryMapping, ParseUnixMemoryProfileTruncated) {\n  // ParseUnixMemoryProfile used to crash on truncated inputs.\n  // This test allocates 2 pages, protects the second one\n  // and places the input at the very end of the first page\n  // to test for over-reads.\n  uptr page = GetPageSizeCached();\n  char *mem = static_cast<char *>(\n      MmapOrDie(2 * page, \"ParseUnixMemoryProfileTruncated\"));\n  EXPECT_TRUE(MprotectNoAccess(reinterpret_cast<uptr>(mem + page), page));\n  const uptr len = strlen(parse_unix_input);\n  for (uptr i = 0; i < len; i++) {\n    char *smaps = mem + page - len + i;\n    memcpy(smaps, parse_unix_input, len - i);\n    ParseUnixMemoryProfile([](uptr p, uptr rss, bool file, uptr *mem) {},\n                           nullptr, smaps, len - i);\n  }\n  UnmapOrDie(mem, 2 * page);\n}\n#  endif\n\n}  // namespace __sanitizer\n#endif  // !defined(_WIN32)\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_pthread_wrappers.h",
    "content": "//===-- sanitizer_pthread_wrappers.h ----------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of *Sanitizer runtime.\n// It provides handy wrappers for thread manipulation, that:\n//  a) assert on any failure rather than returning an error code\n//  b) defines pthread-like interface on platforms where where <pthread.h>\n//     is not supplied by default.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_PTHREAD_WRAPPERS_H\n#define SANITIZER_PTHREAD_WRAPPERS_H\n\n#include \"sanitizer_test_utils.h\"\n\n#if !defined(_WIN32)\n# include <pthread.h>\n// Simply forward the arguments and check that the pthread functions succeed.\n# define PTHREAD_CREATE(a, b, c, d) ASSERT_EQ(0, pthread_create(a, b, c, d))\n# define PTHREAD_JOIN(a, b) ASSERT_EQ(0, pthread_join(a, b))\n#else\ntypedef HANDLE pthread_t;\n\nstruct PthreadHelperCreateThreadInfo {\n  void *(*start_routine)(void *);\n  void *arg;\n};\n\ninline DWORD WINAPI PthreadHelperThreadProc(void *arg) {\n  PthreadHelperCreateThreadInfo *start_data =\n      reinterpret_cast<PthreadHelperCreateThreadInfo*>(arg);\n  (start_data->start_routine)(start_data->arg);\n  delete start_data;\n  return 0;\n}\n\ninline void PTHREAD_CREATE(pthread_t *thread, void *attr,\n                           void *(*start_routine)(void *), void *arg) {\n  ASSERT_EQ(0, attr) << \"Thread attributes are not supported yet.\";\n  PthreadHelperCreateThreadInfo *data = new PthreadHelperCreateThreadInfo;\n  data->start_routine = start_routine;\n  data->arg = arg;\n  *thread = CreateThread(0, 0, PthreadHelperThreadProc, data, 0, 0);\n  DWORD err = GetLastError();\n  ASSERT_NE(nullptr, *thread) << \"Failed to create a thread, got error 0x\"\n                              << std::hex << err;\n}\n\ninline void PTHREAD_JOIN(pthread_t thread, void **value_ptr) {\n  ASSERT_EQ(0, value_ptr) << \"Nonzero value_ptr is not supported yet.\";\n  ASSERT_EQ(WAIT_OBJECT_0, WaitForSingleObject(thread, INFINITE));\n  ASSERT_NE(0, CloseHandle(thread));\n}\n\ninline void pthread_exit(void *retval) {\n  ASSERT_EQ(0, retval) << \"Nonzero retval is not supported yet.\";\n  ExitThread(0);\n}\n#endif  // _WIN32\n\n#endif  // SANITIZER_PTHREAD_WRAPPERS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_quarantine_test.cpp",
    "content": "//===-- sanitizer_quarantine_test.cpp -------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_quarantine.h\"\n#include \"gtest/gtest.h\"\n\n#include <stdlib.h>\n\nnamespace __sanitizer {\n\nstruct QuarantineCallback {\n  void Recycle(void *m) {}\n  void *Allocate(uptr size) {\n    return malloc(size);\n  }\n  void Deallocate(void *p) {\n    free(p);\n  }\n};\n\ntypedef QuarantineCache<QuarantineCallback> Cache;\n\nstatic void* kFakePtr = reinterpret_cast<void*>(0xFA83FA83);\nstatic const size_t kBlockSize = 8;\n\nstatic QuarantineCallback cb;\n\nstatic void DeallocateCache(Cache *cache) {\n  while (QuarantineBatch *batch = cache->DequeueBatch())\n    cb.Deallocate(batch);\n}\n\nTEST(SanitizerCommon, QuarantineBatchMerge) {\n  // Verify the trivial case.\n  QuarantineBatch into;\n  into.init(kFakePtr, 4UL);\n  QuarantineBatch from;\n  from.init(kFakePtr, 8UL);\n\n  into.merge(&from);\n\n  ASSERT_EQ(into.count, 2UL);\n  ASSERT_EQ(into.batch[0], kFakePtr);\n  ASSERT_EQ(into.batch[1], kFakePtr);\n  ASSERT_EQ(into.size, 12UL + sizeof(QuarantineBatch));\n  ASSERT_EQ(into.quarantined_size(), 12UL);\n\n  ASSERT_EQ(from.count, 0UL);\n  ASSERT_EQ(from.size, sizeof(QuarantineBatch));\n  ASSERT_EQ(from.quarantined_size(), 0UL);\n\n  // Merge the batch to the limit.\n  for (uptr i = 2; i < QuarantineBatch::kSize; ++i)\n    from.push_back(kFakePtr, 8UL);\n  ASSERT_TRUE(into.count + from.count == QuarantineBatch::kSize);\n  ASSERT_TRUE(into.can_merge(&from));\n\n  into.merge(&from);\n  ASSERT_TRUE(into.count == QuarantineBatch::kSize);\n\n  // No more space, not even for one element.\n  from.init(kFakePtr, 8UL);\n\n  ASSERT_FALSE(into.can_merge(&from));\n}\n\nTEST(SanitizerCommon, QuarantineCacheMergeBatchesEmpty) {\n  Cache cache;\n  Cache to_deallocate;\n  cache.MergeBatches(&to_deallocate);\n\n  ASSERT_EQ(to_deallocate.Size(), 0UL);\n  ASSERT_EQ(to_deallocate.DequeueBatch(), nullptr);\n}\n\nTEST(SanitizerCommon, QuarantineCacheMergeBatchesOneBatch) {\n  Cache cache;\n  cache.Enqueue(cb, kFakePtr, kBlockSize);\n  ASSERT_EQ(kBlockSize + sizeof(QuarantineBatch), cache.Size());\n\n  Cache to_deallocate;\n  cache.MergeBatches(&to_deallocate);\n\n  // Nothing to merge, nothing to deallocate.\n  ASSERT_EQ(kBlockSize + sizeof(QuarantineBatch), cache.Size());\n\n  ASSERT_EQ(to_deallocate.Size(), 0UL);\n  ASSERT_EQ(to_deallocate.DequeueBatch(), nullptr);\n\n  DeallocateCache(&cache);\n}\n\nTEST(SanitizerCommon, QuarantineCacheMergeBatchesSmallBatches) {\n  // Make a cache with two batches small enough to merge.\n  Cache from;\n  from.Enqueue(cb, kFakePtr, kBlockSize);\n  Cache cache;\n  cache.Enqueue(cb, kFakePtr, kBlockSize);\n\n  cache.Transfer(&from);\n  ASSERT_EQ(kBlockSize * 2 + sizeof(QuarantineBatch) * 2, cache.Size());\n\n  Cache to_deallocate;\n  cache.MergeBatches(&to_deallocate);\n\n  // Batches merged, one batch to deallocate.\n  ASSERT_EQ(kBlockSize * 2 + sizeof(QuarantineBatch), cache.Size());\n  ASSERT_EQ(to_deallocate.Size(), sizeof(QuarantineBatch));\n\n  DeallocateCache(&cache);\n  DeallocateCache(&to_deallocate);\n}\n\nTEST(SanitizerCommon, QuarantineCacheMergeBatchesTooBigToMerge) {\n  const uptr kNumBlocks = QuarantineBatch::kSize - 1;\n\n  // Make a cache with two batches small enough to merge.\n  Cache from;\n  Cache cache;\n  for (uptr i = 0; i < kNumBlocks; ++i) {\n    from.Enqueue(cb, kFakePtr, kBlockSize);\n    cache.Enqueue(cb, kFakePtr, kBlockSize);\n  }\n  cache.Transfer(&from);\n  ASSERT_EQ(kBlockSize * kNumBlocks * 2 +\n            sizeof(QuarantineBatch) * 2, cache.Size());\n\n  Cache to_deallocate;\n  cache.MergeBatches(&to_deallocate);\n\n  // Batches cannot be merged.\n  ASSERT_EQ(kBlockSize * kNumBlocks * 2 +\n            sizeof(QuarantineBatch) * 2, cache.Size());\n  ASSERT_EQ(to_deallocate.Size(), 0UL);\n\n  DeallocateCache(&cache);\n}\n\nTEST(SanitizerCommon, QuarantineCacheMergeBatchesALotOfBatches) {\n  const uptr kNumBatchesAfterMerge = 3;\n  const uptr kNumBlocks = QuarantineBatch::kSize * kNumBatchesAfterMerge;\n  const uptr kNumBatchesBeforeMerge = kNumBlocks;\n\n  // Make a cache with many small batches.\n  Cache cache;\n  for (uptr i = 0; i < kNumBlocks; ++i) {\n    Cache from;\n    from.Enqueue(cb, kFakePtr, kBlockSize);\n    cache.Transfer(&from);\n  }\n\n  ASSERT_EQ(kBlockSize * kNumBlocks +\n            sizeof(QuarantineBatch) * kNumBatchesBeforeMerge, cache.Size());\n\n  Cache to_deallocate;\n  cache.MergeBatches(&to_deallocate);\n\n  // All blocks should fit into 3 batches.\n  ASSERT_EQ(kBlockSize * kNumBlocks +\n            sizeof(QuarantineBatch) * kNumBatchesAfterMerge, cache.Size());\n\n  ASSERT_EQ(to_deallocate.Size(),\n            sizeof(QuarantineBatch) *\n                (kNumBatchesBeforeMerge - kNumBatchesAfterMerge));\n\n  DeallocateCache(&cache);\n  DeallocateCache(&to_deallocate);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp",
    "content": "//===-- sanitizer_vector_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of *Sanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_ring_buffer.h\"\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nstruct LargeStruct {\n  int64_t v;\n  int64_t extra[3];\n\n  explicit LargeStruct(int64_t v) : v(v) {}\n  operator int64_t() { return v; }\n};\n\nstruct Struct10Bytes {\n  short t[3];\n};\n\nTEST(RingBuffer, Construct) {\n  RingBuffer<int64_t> *RBlong = RingBuffer<int64_t>::New(20);\n  EXPECT_EQ(RBlong->size(), 20U);\n  RBlong->Delete();\n}\n\ntemplate <class T> void TestRB() {\n  RingBuffer<T> *RB;\n  const size_t Sizes[] = {1, 2, 3, 5, 8, 16, 20, 40, 10000};\n  for (size_t Size : Sizes) {\n    RB = RingBuffer<T>::New(Size);\n    EXPECT_EQ(RB->size(), Size);\n    RB->Delete();\n  }\n\n  RB = RingBuffer<T>::New(4);\n  EXPECT_EQ(RB->size(), 4U);\n#define EXPECT_RING_BUFFER(a0, a1, a2, a3) \\\n  EXPECT_EQ((int64_t)(*RB)[0], (int64_t)a0);                 \\\n  EXPECT_EQ((int64_t)(*RB)[1], (int64_t)a1);                 \\\n  EXPECT_EQ((int64_t)(*RB)[2], (int64_t)a2);                 \\\n  EXPECT_EQ((int64_t)(*RB)[3], (int64_t)a3);\n\n  RB->push(T(1)); EXPECT_RING_BUFFER(1, 0, 0, 0);\n  RB->push(T(2)); EXPECT_RING_BUFFER(2, 1, 0, 0);\n  RB->push(T(3)); EXPECT_RING_BUFFER(3, 2, 1, 0);\n  RB->push(T(4)); EXPECT_RING_BUFFER(4, 3, 2, 1);\n  RB->push(T(5)); EXPECT_RING_BUFFER(5, 4, 3, 2);\n  RB->push(T(6)); EXPECT_RING_BUFFER(6, 5, 4, 3);\n  RB->push(T(7)); EXPECT_RING_BUFFER(7, 6, 5, 4);\n  RB->push(T(8)); EXPECT_RING_BUFFER(8, 7, 6, 5);\n  RB->push(T(9)); EXPECT_RING_BUFFER(9, 8, 7, 6);\n  RB->push(T(10)); EXPECT_RING_BUFFER(10, 9, 8, 7);\n  RB->push(T(11)); EXPECT_RING_BUFFER(11, 10, 9, 8);\n  RB->push(T(12)); EXPECT_RING_BUFFER(12, 11, 10, 9);\n\n#undef EXPECT_RING_BUFFER\n}\n\n#if SANITIZER_WORDSIZE == 64\nTEST(RingBuffer, int64) {\n  TestRB<int64_t>();\n}\n\nTEST(RingBuffer, LargeStruct) {\n  TestRB<LargeStruct>();\n}\n\ntemplate<typename T>\nCompactRingBuffer<T> *AllocCompactRingBuffer(size_t count) {\n  size_t sz = sizeof(T) * count;\n  EXPECT_EQ(0ULL, sz % 4096);\n  void *p = MmapAlignedOrDieOnFatalError(sz, sz * 2, \"CompactRingBuffer\");\n  return new CompactRingBuffer<T>(p, sz);\n}\n\nTEST(CompactRingBuffer, int64) {\n  const size_t page_sizes[] = {1, 2, 4, 128};\n\n  for (size_t pages : page_sizes) {\n    size_t count = 4096 * pages / sizeof(int64_t);\n    auto R = AllocCompactRingBuffer<int64_t>(count);\n    int64_t top = count * 3 + 13;\n    for (int64_t i = 0; i < top; ++i) R->push(i);\n    for (int64_t i = 0; i < (int64_t)count; ++i)\n      EXPECT_EQ(top - i - 1, (*R)[i]);\n  }\n}\n#endif\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_stack_store_test.cpp",
    "content": "//===-- sanitizer_stack_store_test.cpp --------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_stack_store.h\"\n\n#include <algorithm>\n#include <numeric>\n#include <vector>\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_atomic.h\"\n#include \"sanitizer_hash.h\"\n#include \"sanitizer_stacktrace.h\"\n\nnamespace __sanitizer {\n\nclass StackStoreTest : public testing::Test {\n protected:\n  void SetUp() override {}\n  void TearDown() override { store_.TestOnlyUnmap(); }\n\n  template <typename Fn>\n  void ForEachTrace(Fn fn, uptr n = 1000000) {\n    std::vector<uptr> frames(kStackTraceMax);\n    std::iota(frames.begin(), frames.end(), 0x100000);\n    MurMur2HashBuilder h(0);\n    for (uptr i = 0; i < n; ++i) {\n      h.add(i);\n      u32 size = h.get() % kStackTraceMax;\n      h.add(i);\n      uptr tag = h.get() % 256;\n      StackTrace s(frames.data(), size, tag);\n      if (!s.size && !s.tag)\n        continue;\n      fn(s);\n      if (HasFailure())\n        return;\n      std::next_permutation(frames.begin(), frames.end());\n    };\n  }\n\n  using BlockInfo = StackStore::BlockInfo;\n\n  uptr GetTotalFramesCount() const {\n    return atomic_load_relaxed(&store_.total_frames_);\n  }\n\n  uptr CountReadyToPackBlocks() {\n    uptr res = 0;\n    for (BlockInfo& b : store_.blocks_) res += b.Stored(0);\n    return res;\n  }\n\n  uptr CountPackedBlocks() const {\n    uptr res = 0;\n    for (const BlockInfo& b : store_.blocks_) res += b.IsPacked();\n    return res;\n  }\n\n  uptr IdToOffset(StackStore::Id id) const { return store_.IdToOffset(id); }\n\n  static constexpr uptr kBlockSizeFrames = StackStore::kBlockSizeFrames;\n  static constexpr uptr kBlockSizeBytes = StackStore::kBlockSizeBytes;\n\n  StackStore store_ = {};\n};\n\nTEST_F(StackStoreTest, Empty) {\n  uptr before = store_.Allocated();\n  uptr pack = 0;\n  EXPECT_EQ(0u, store_.Store({}, &pack));\n  uptr after = store_.Allocated();\n  EXPECT_EQ(before, after);\n}\n\nTEST_F(StackStoreTest, Basic) {\n  std::vector<StackStore::Id> ids;\n  ForEachTrace([&](const StackTrace& s) {\n    uptr pack = 0;\n    ids.push_back(store_.Store(s, &pack));\n  });\n\n  auto id = ids.begin();\n  ForEachTrace([&](const StackTrace& s) {\n    StackTrace trace = store_.Load(*(id++));\n    EXPECT_EQ(s.size, trace.size);\n    EXPECT_EQ(s.tag, trace.tag);\n    EXPECT_EQ(std::vector<uptr>(s.trace, s.trace + s.size),\n              std::vector<uptr>(trace.trace, trace.trace + trace.size));\n  });\n}\n\nTEST_F(StackStoreTest, Allocated) {\n  EXPECT_LE(store_.Allocated(), 0x100000u);\n  std::vector<StackStore::Id> ids;\n  ForEachTrace([&](const StackTrace& s) {\n    uptr pack = 0;\n    ids.push_back(store_.Store(s, &pack));\n  });\n  EXPECT_NEAR(store_.Allocated(), FIRST_32_SECOND_64(500000000u, 1000000000u),\n              FIRST_32_SECOND_64(50000000u, 100000000u));\n  store_.TestOnlyUnmap();\n  EXPECT_LE(store_.Allocated(), 0x100000u);\n}\n\nTEST_F(StackStoreTest, ReadyToPack) {\n  uptr next_pack = kBlockSizeFrames;\n  uptr total_ready = 0;\n  ForEachTrace(\n      [&](const StackTrace& s) {\n        uptr pack = 0;\n        StackStore::Id id = store_.Store(s, &pack);\n        uptr end_idx = IdToOffset(id) + 1 + s.size;\n        if (end_idx >= next_pack) {\n          EXPECT_EQ(1u, pack);\n          next_pack += kBlockSizeFrames;\n        } else {\n          EXPECT_EQ(0u, pack);\n        }\n        total_ready += pack;\n        EXPECT_EQ(CountReadyToPackBlocks(), total_ready);\n      },\n      100000);\n  EXPECT_EQ(GetTotalFramesCount() / kBlockSizeFrames, total_ready);\n}\n\nstruct StackStorePackTest : public StackStoreTest,\n                            public ::testing::WithParamInterface<\n                                std::pair<StackStore::Compression, uptr>> {};\n\nINSTANTIATE_TEST_SUITE_P(\n    PackUnpacks, StackStorePackTest,\n    ::testing::ValuesIn({\n        StackStorePackTest::ParamType(StackStore::Compression::Delta,\n                                      FIRST_32_SECOND_64(2, 6)),\n        StackStorePackTest::ParamType(StackStore::Compression::LZW,\n                                      FIRST_32_SECOND_64(60, 130)),\n    }));\n\nTEST_P(StackStorePackTest, PackUnpack) {\n  std::vector<StackStore::Id> ids;\n  StackStore::Compression type = GetParam().first;\n  uptr expected_ratio = GetParam().second;\n  ForEachTrace([&](const StackTrace& s) {\n    uptr pack = 0;\n    ids.push_back(store_.Store(s, &pack));\n    if (pack) {\n      uptr before = store_.Allocated();\n      uptr diff = store_.Pack(type);\n      uptr after = store_.Allocated();\n      EXPECT_EQ(before - after, diff);\n      EXPECT_LT(after, before);\n      EXPECT_GE(kBlockSizeBytes / (kBlockSizeBytes - (before - after)),\n                expected_ratio);\n    }\n  });\n  uptr packed_blocks = CountPackedBlocks();\n  // Unpack random block.\n  store_.Load(kBlockSizeFrames * 7 + 123);\n  EXPECT_EQ(packed_blocks - 1, CountPackedBlocks());\n\n  // Unpack all blocks.\n  auto id = ids.begin();\n  ForEachTrace([&](const StackTrace& s) {\n    StackTrace trace = store_.Load(*(id++));\n    EXPECT_EQ(s.size, trace.size);\n    EXPECT_EQ(s.tag, trace.tag);\n    EXPECT_EQ(std::vector<uptr>(s.trace, s.trace + s.size),\n              std::vector<uptr>(trace.trace, trace.trace + trace.size));\n  });\n  EXPECT_EQ(0u, CountPackedBlocks());\n\n  EXPECT_EQ(0u, store_.Pack(type));\n  EXPECT_EQ(0u, CountPackedBlocks());\n}\n\nTEST_P(StackStorePackTest, Failed) {\n  MurMur2Hash64Builder h(0);\n  StackStore::Compression type = GetParam().first;\n  std::vector<uptr> frames(200);\n  for (uptr i = 0; i < kBlockSizeFrames * 4 / frames.size(); ++i) {\n    for (uptr& f : frames) {\n      h.add(1);\n      // Make it difficult to pack.\n      f = h.get();\n    }\n    uptr pack = 0;\n    store_.Store(StackTrace(frames.data(), frames.size()), &pack);\n    if (pack)\n      EXPECT_EQ(0u, store_.Pack(type));\n  }\n\n  EXPECT_EQ(0u, CountPackedBlocks());\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_stackdepot_test.cpp",
    "content": "//===-- sanitizer_stackdepot_test.cpp -------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_stackdepot.h\"\n\n#include <atomic>\n#include <numeric>\n#include <regex>\n#include <sstream>\n#include <string>\n#include <thread>\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n#include \"sanitizer_common/sanitizer_libc.h\"\n\nnamespace __sanitizer {\n\nclass StackDepotTest : public testing::Test {\n protected:\n  void SetUp() override { StackDepotTestOnlyUnmap(); }\n  void TearDown() override {\n    StackDepotStats stack_depot_stats = StackDepotGetStats();\n    Printf(\"StackDepot: %zd ids; %zdM allocated\\n\",\n           stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);\n    StackDepotTestOnlyUnmap();\n  }\n};\n\nTEST_F(StackDepotTest, Basic) {\n  uptr array[] = {1, 2, 3, 4, 5};\n  StackTrace s1(array, ARRAY_SIZE(array));\n  u32 i1 = StackDepotPut(s1);\n  StackTrace stack = StackDepotGet(i1);\n  EXPECT_NE(stack.trace, (uptr*)0);\n  EXPECT_EQ(ARRAY_SIZE(array), stack.size);\n  EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));\n}\n\nTEST_F(StackDepotTest, Absent) {\n  StackTrace stack = StackDepotGet((1 << 30) - 1);\n  EXPECT_EQ((uptr*)0, stack.trace);\n}\n\nTEST_F(StackDepotTest, EmptyStack) {\n  u32 i1 = StackDepotPut(StackTrace());\n  StackTrace stack = StackDepotGet(i1);\n  EXPECT_EQ((uptr*)0, stack.trace);\n}\n\nTEST_F(StackDepotTest, ZeroId) {\n  StackTrace stack = StackDepotGet(0);\n  EXPECT_EQ((uptr*)0, stack.trace);\n}\n\nTEST_F(StackDepotTest, Same) {\n  uptr array[] = {1, 2, 3, 4, 6};\n  StackTrace s1(array, ARRAY_SIZE(array));\n  u32 i1 = StackDepotPut(s1);\n  u32 i2 = StackDepotPut(s1);\n  EXPECT_EQ(i1, i2);\n  StackTrace stack = StackDepotGet(i1);\n  EXPECT_NE(stack.trace, (uptr*)0);\n  EXPECT_EQ(ARRAY_SIZE(array), stack.size);\n  EXPECT_EQ(0, internal_memcmp(stack.trace, array, sizeof(array)));\n}\n\nTEST_F(StackDepotTest, Several) {\n  uptr array1[] = {1, 2, 3, 4, 7};\n  StackTrace s1(array1, ARRAY_SIZE(array1));\n  u32 i1 = StackDepotPut(s1);\n  uptr array2[] = {1, 2, 3, 4, 8, 9};\n  StackTrace s2(array2, ARRAY_SIZE(array2));\n  u32 i2 = StackDepotPut(s2);\n  EXPECT_NE(i1, i2);\n}\n\nTEST_F(StackDepotTest, Print) {\n  uptr array1[] = {0x111, 0x222, 0x333, 0x444, 0x777};\n  StackTrace s1(array1, ARRAY_SIZE(array1));\n  u32 i1 = StackDepotPut(s1);\n  uptr array2[] = {0x1111, 0x2222, 0x3333, 0x4444, 0x8888, 0x9999};\n  StackTrace s2(array2, ARRAY_SIZE(array2));\n  u32 i2 = StackDepotPut(s2);\n  EXPECT_NE(i1, i2);\n\n  auto fix_regex = [](const std::string& s) -> std::string {\n    if (!SANITIZER_WINDOWS)\n      return s;\n    return std::regex_replace(s, std::regex(\"\\\\.\\\\*\"), \".*\\\\n.*\");\n  };\n  EXPECT_EXIT(\n      (StackDepotPrintAll(), exit(0)), ::testing::ExitedWithCode(0),\n      fix_regex(\"Stack for id .*#0 0x1.*#1 0x2.*#2 0x3.*#3 0x4.*#4 0x7.*\"));\n  EXPECT_EXIT(\n      (StackDepotPrintAll(), exit(0)), ::testing::ExitedWithCode(0),\n      fix_regex(\n          \"Stack for id .*#0 0x1.*#1 0x2.*#2 0x3.*#3 0x4.*#4 0x8.*#5 0x9.*\"));\n}\n\nTEST_F(StackDepotTest, PrintNoLock) {\n  u32 n = 2000;\n  std::vector<u32> idx2id(n);\n  for (u32 i = 0; i < n; ++i) {\n    uptr array[] = {0x111, 0x222, i, 0x444, 0x777};\n    StackTrace s(array, ARRAY_SIZE(array));\n    idx2id[i] = StackDepotPut(s);\n  }\n  StackDepotPrintAll();\n  for (u32 i = 0; i < n; ++i) {\n    uptr array[] = {0x111, 0x222, i, 0x444, 0x777};\n    StackTrace s(array, ARRAY_SIZE(array));\n    CHECK_EQ(idx2id[i], StackDepotPut(s));\n  }\n}\n\nstatic struct StackDepotBenchmarkParams {\n  int UniqueStacksPerThread;\n  int RepeatPerThread;\n  int Threads;\n  bool UniqueThreads;\n  bool UseCount;\n} params[] = {\n    // All traces are unique, very unusual.\n    {10000000, 1, 1, false, false},\n    {8000000, 1, 4, false, false},\n    {8000000, 1, 16, false, false},\n    // Probably most realistic sets.\n    {3000000, 10, 1, false, false},\n    {3000000, 10, 4, false, false},\n    {3000000, 10, 16, false, false},\n    // Update use count as msan/dfsan.\n    {3000000, 10, 1, false, true},\n    {3000000, 10, 4, false, true},\n    {3000000, 10, 16, false, true},\n    // Unrealistic, as above, but traces are unique inside of thread.\n    {4000000, 1, 4, true, false},\n    {2000000, 1, 16, true, false},\n    {2000000, 10, 4, true, false},\n    {500000, 10, 16, true, false},\n    {1500000, 10, 4, true, true},\n    {800000, 10, 16, true, true},\n};\n\nstatic std::string PrintStackDepotBenchmarkParams(\n    const testing::TestParamInfo<StackDepotBenchmarkParams>& info) {\n  std::stringstream name;\n  name << info.param.UniqueStacksPerThread << \"_\" << info.param.RepeatPerThread\n       << \"_\" << info.param.Threads << (info.param.UseCount ? \"_UseCount\" : \"\")\n       << (info.param.UniqueThreads ? \"_UniqueThreads\" : \"\");\n  return name.str();\n}\n\nclass StackDepotBenchmark\n    : public StackDepotTest,\n      public testing::WithParamInterface<StackDepotBenchmarkParams> {};\n\n// Test which can be used as a simple benchmark. It's disabled to avoid slowing\n// down check-sanitizer.\n// Usage: Sanitizer-<ARCH>-Test --gtest_also_run_disabled_tests \\\n//   '--gtest_filter=*Benchmark*'\nTEST_P(StackDepotBenchmark, DISABLED_Benchmark) {\n  auto Param = GetParam();\n  std::atomic<unsigned int> here = {};\n\n  auto thread = [&](int idx) {\n    here++;\n    while (here < Param.UniqueThreads) std::this_thread::yield();\n\n    std::vector<uptr> frames(64);\n    for (int r = 0; r < Param.RepeatPerThread; ++r) {\n      std::iota(frames.begin(), frames.end(), idx + 1);\n      for (int i = 0; i < Param.UniqueStacksPerThread; ++i) {\n        StackTrace s(frames.data(), frames.size());\n        auto h = StackDepotPut_WithHandle(s);\n        if (Param.UseCount)\n          h.inc_use_count_unsafe();\n        std::next_permutation(frames.begin(), frames.end());\n      };\n    }\n  };\n\n  std::vector<std::thread> threads;\n  for (int i = 0; i < Param.Threads; ++i)\n    threads.emplace_back(thread, Param.UniqueThreads * i);\n  for (auto& t : threads) t.join();\n}\n\nINSTANTIATE_TEST_SUITE_P(StackDepotBenchmarkSuite, StackDepotBenchmark,\n                         testing::ValuesIn(params),\n                         PrintStackDepotBenchmarkParams);\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_stacktrace_printer_test.cpp",
    "content": "//===-- sanitizer_common_printer_test.cpp ---------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of sanitizer_common test suite.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_stacktrace_printer.h\"\n\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nTEST(SanitizerStacktracePrinter, RenderSourceLocation) {\n  InternalScopedString str;\n  RenderSourceLocation(&str, \"/dir/file.cc\", 10, 5, false, \"\");\n  EXPECT_STREQ(\"/dir/file.cc:10:5\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 11, 0, false, \"\");\n  EXPECT_STREQ(\"/dir/file.cc:11\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 0, 0, false, \"\");\n  EXPECT_STREQ(\"/dir/file.cc\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 10, 5, false, \"/dir/\");\n  EXPECT_STREQ(\"file.cc:10:5\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 10, 5, true, \"\");\n  EXPECT_STREQ(\"/dir/file.cc(10,5)\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 11, 0, true, \"\");\n  EXPECT_STREQ(\"/dir/file.cc(11)\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 0, 0, true, \"\");\n  EXPECT_STREQ(\"/dir/file.cc\", str.data());\n\n  str.clear();\n  RenderSourceLocation(&str, \"/dir/file.cc\", 10, 5, true, \"/dir/\");\n  EXPECT_STREQ(\"file.cc(10,5)\", str.data());\n}\n\nTEST(SanitizerStacktracePrinter, RenderModuleLocation) {\n  InternalScopedString str;\n  RenderModuleLocation(&str, \"/dir/exe\", 0x123, kModuleArchUnknown, \"\");\n  EXPECT_STREQ(\"(/dir/exe+0x123)\", str.data());\n\n  // Check that we strip file prefix if necessary.\n  str.clear();\n  RenderModuleLocation(&str, \"/dir/exe\", 0x123, kModuleArchUnknown, \"/dir/\");\n  EXPECT_STREQ(\"(exe+0x123)\", str.data());\n\n  // Check that we render the arch.\n  str.clear();\n  RenderModuleLocation(&str, \"/dir/exe\", 0x123, kModuleArchX86_64H, \"/dir/\");\n  EXPECT_STREQ(\"(exe:x86_64h+0x123)\", str.data());\n}\n\nTEST(SanitizerStacktracePrinter, RenderFrame) {\n  int frame_no = 42;\n  AddressInfo info;\n  info.address = 0x400000;\n  info.module = internal_strdup(\"/path/to/my/module\");\n  info.module_offset = 0x200;\n  info.function = internal_strdup(\"function_foo\");\n  info.function_offset = 0x100;\n  info.file = internal_strdup(\"/path/to/my/source\");\n  info.line = 10;\n  info.column = 5;\n  InternalScopedString str;\n\n  // Dump all the AddressInfo fields.\n  RenderFrame(&str,\n              \"%% Frame:%n PC:%p Module:%m ModuleOffset:%o \"\n              \"Function:%f FunctionOffset:%q Source:%s Line:%l \"\n              \"Column:%c\",\n              frame_no, info.address, &info, false, \"/path/to/\", \"function_\");\n  EXPECT_STREQ(\"% Frame:42 PC:0x400000 Module:my/module ModuleOffset:0x200 \"\n               \"Function:foo FunctionOffset:0x100 Source:my/source Line:10 \"\n               \"Column:5\",\n               str.data());\n  info.Clear();\n  str.clear();\n\n  // Test special format specifiers.\n  info.address = 0x400000;\n  RenderFrame(&str, \"%M\", frame_no, info.address, &info, false);\n  EXPECT_NE(nullptr, internal_strstr(str.data(), \"400000\"));\n  str.clear();\n\n  RenderFrame(&str, \"%L\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"(<unknown module>)\", str.data());\n  str.clear();\n\n  info.module = internal_strdup(\"/path/to/module\");\n  info.module_offset = 0x200;\n  RenderFrame(&str, \"%M\", frame_no, info.address, &info, false);\n  EXPECT_NE(nullptr, internal_strstr(str.data(), \"(module+0x\"));\n  EXPECT_NE(nullptr, internal_strstr(str.data(), \"200\"));\n  str.clear();\n\n  RenderFrame(&str, \"%L\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"(/path/to/module+0x200)\", str.data());\n  str.clear();\n\n  RenderFrame(&str, \"%b\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"\", str.data());\n  str.clear();\n\n  info.uuid_size = 2;\n  info.uuid[0] = 0x55;\n  info.uuid[1] = 0x66;\n\n  RenderFrame(&str, \"%M\", frame_no, info.address, &info, false);\n  EXPECT_NE(nullptr, internal_strstr(str.data(), \"(module+0x\"));\n  EXPECT_NE(nullptr, internal_strstr(str.data(), \"200\"));\n  EXPECT_NE(nullptr, internal_strstr(str.data(), \"BuildId: 5566\"));\n  str.clear();\n\n  RenderFrame(&str, \"%L\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"(/path/to/module+0x200) (BuildId: 5566)\", str.data());\n  str.clear();\n\n  RenderFrame(&str, \"%b\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"(BuildId: 5566)\", str.data());\n  str.clear();\n\n  info.function = internal_strdup(\"my_function\");\n  RenderFrame(&str, \"%F\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"in my_function\", str.data());\n  str.clear();\n\n  info.function_offset = 0x100;\n  RenderFrame(&str, \"%F %S\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"in my_function+0x100 <null>\", str.data());\n  str.clear();\n\n  info.file = internal_strdup(\"my_file\");\n  RenderFrame(&str, \"%F %S\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"in my_function my_file\", str.data());\n  str.clear();\n\n  info.line = 10;\n  RenderFrame(&str, \"%F %S\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"in my_function my_file:10\", str.data());\n  str.clear();\n\n  info.column = 5;\n  RenderFrame(&str, \"%S %L\", frame_no, info.address, &info, false);\n  EXPECT_STREQ(\"my_file:10:5 my_file:10:5\", str.data());\n  str.clear();\n\n  RenderFrame(&str, \"%S %L\", frame_no, info.address, &info, true);\n  EXPECT_STREQ(\"my_file(10,5) my_file(10,5)\", str.data());\n  str.clear();\n\n  info.column = 0;\n  RenderFrame(&str, \"%F %S\", frame_no, info.address, &info, true);\n  EXPECT_STREQ(\"in my_function my_file(10)\", str.data());\n  str.clear();\n\n  info.line = 0;\n  RenderFrame(&str, \"%F %S\", frame_no, info.address, &info, true);\n  EXPECT_STREQ(\"in my_function my_file\", str.data());\n  str.clear();\n\n  info.Clear();\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_stacktrace_test.cpp",
    "content": "//===-- sanitizer_stacktrace_test.cpp -------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_stacktrace.h\"\n\n#include <string.h>\n\n#include <algorithm>\n#include <string>\n\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_internal_defs.h\"\n\nusing testing::ContainsRegex;\nusing testing::MatchesRegex;\n\nnamespace __sanitizer {\n\nclass FastUnwindTest : public ::testing::Test {\n protected:\n  virtual void SetUp();\n  virtual void TearDown();\n\n  void UnwindFast();\n\n  void *mapping;\n  uhwptr *fake_stack;\n  const uptr fake_stack_size = 10;\n  uhwptr start_pc;\n\n  uhwptr fake_bp;\n  uhwptr fake_top;\n  uhwptr fake_bottom;\n  BufferedStackTrace trace;\n\n#if defined(__riscv)\n  const uptr kFpOffset = 4;\n  const uptr kBpOffset = 2;\n#else\n  const uptr kFpOffset = 2;\n  const uptr kBpOffset = 0;\n#endif\n\n private:\n  CommonFlags tmp_flags_;\n};\n\nstatic uptr PC(uptr idx) {\n  return (1<<20) + idx;\n}\n\nvoid FastUnwindTest::SetUp() {\n  size_t ps = GetPageSize();\n  mapping = MmapOrDie(2 * ps, \"FastUnwindTest\");\n  MprotectNoAccess((uptr)mapping, ps);\n\n  // Unwinder may peek 1 word down from the starting FP.\n  fake_stack = (uhwptr *)((uptr)mapping + ps + sizeof(uhwptr));\n\n  // Fill an array of pointers with fake fp+retaddr pairs.  Frame pointers have\n  // even indices.\n  for (uptr i = 0; i + 1 < fake_stack_size; i += 2) {\n    fake_stack[i] = (uptr)&fake_stack[i + kFpOffset];  // fp\n    fake_stack[i+1] = PC(i + 1); // retaddr\n  }\n  // Mark the last fp point back up to terminate the stack trace.\n  fake_stack[RoundDownTo(fake_stack_size - 1, 2)] = (uhwptr)&fake_stack[0];\n\n  // Top is two slots past the end because UnwindFast subtracts two.\n  fake_top = (uhwptr)&fake_stack[fake_stack_size + kFpOffset];\n  // Bottom is one slot before the start because UnwindFast uses >.\n  fake_bottom = (uhwptr)mapping;\n  fake_bp = (uptr)&fake_stack[kBpOffset];\n  start_pc = PC(0);\n\n  tmp_flags_.CopyFrom(*common_flags());\n}\n\nvoid FastUnwindTest::TearDown() {\n  size_t ps = GetPageSize();\n  UnmapOrDie(mapping, 2 * ps);\n\n  // Restore default flags.\n  OverrideCommonFlags(tmp_flags_);\n}\n\n#if SANITIZER_CAN_FAST_UNWIND\n\n#ifdef __sparc__\n// Fake stacks don't meet SPARC UnwindFast requirements.\n#define SKIP_ON_SPARC(x) DISABLED_##x\n#else\n#define SKIP_ON_SPARC(x) x\n#endif\n\nvoid FastUnwindTest::UnwindFast() {\n  trace.UnwindFast(start_pc, fake_bp, fake_top, fake_bottom, kStackTraceMax);\n}\n\nTEST_F(FastUnwindTest, SKIP_ON_SPARC(Basic)) {\n  UnwindFast();\n  // Should get all on-stack retaddrs and start_pc.\n  EXPECT_EQ(6U, trace.size);\n  EXPECT_EQ(start_pc, trace.trace[0]);\n  for (uptr i = 1; i <= 5; i++) {\n    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);\n  }\n}\n\n// From: https://github.com/google/sanitizers/issues/162\nTEST_F(FastUnwindTest, SKIP_ON_SPARC(FramePointerLoop)) {\n  // Make one fp point to itself.\n  fake_stack[4] = (uhwptr)&fake_stack[4];\n  UnwindFast();\n  // Should get all on-stack retaddrs up to the 4th slot and start_pc.\n  EXPECT_EQ(4U, trace.size);\n  EXPECT_EQ(start_pc, trace.trace[0]);\n  for (uptr i = 1; i <= 3; i++) {\n    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);\n  }\n}\n\nTEST_F(FastUnwindTest, SKIP_ON_SPARC(MisalignedFramePointer)) {\n  // Make one fp misaligned.\n  fake_stack[4] += 3;\n  UnwindFast();\n  // Should get all on-stack retaddrs up to the 4th slot and start_pc.\n  EXPECT_EQ(4U, trace.size);\n  EXPECT_EQ(start_pc, trace.trace[0]);\n  for (uptr i = 1; i < 4U; i++) {\n    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);\n  }\n}\n\nTEST_F(FastUnwindTest, OneFrameStackTrace) {\n  trace.Unwind(start_pc, fake_bp, nullptr, true, 1);\n  EXPECT_EQ(1U, trace.size);\n  EXPECT_EQ(start_pc, trace.trace[0]);\n  EXPECT_EQ((uhwptr)&fake_stack[kBpOffset], trace.top_frame_bp);\n}\n\nTEST_F(FastUnwindTest, ZeroFramesStackTrace) {\n  trace.Unwind(start_pc, fake_bp, nullptr, true, 0);\n  EXPECT_EQ(0U, trace.size);\n  EXPECT_EQ(0U, trace.top_frame_bp);\n}\n\nTEST_F(FastUnwindTest, SKIP_ON_SPARC(FPBelowPrevFP)) {\n  // The next FP points to unreadable memory inside the stack limits, but below\n  // current FP.\n  fake_stack[0] = (uhwptr)&fake_stack[-50];\n  fake_stack[1] = PC(1);\n  UnwindFast();\n  EXPECT_EQ(2U, trace.size);\n  EXPECT_EQ(PC(0), trace.trace[0]);\n  EXPECT_EQ(PC(1), trace.trace[1]);\n}\n\nTEST_F(FastUnwindTest, SKIP_ON_SPARC(CloseToZeroFrame)) {\n  // Make one pc a NULL pointer.\n  fake_stack[5] = 0x0;\n  UnwindFast();\n  // The stack should be truncated at the NULL pointer (and not include it).\n  EXPECT_EQ(3U, trace.size);\n  EXPECT_EQ(start_pc, trace.trace[0]);\n  for (uptr i = 1; i < 3U; i++) {\n    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);\n  }\n}\n\nusing StackPrintTest = FastUnwindTest;\n\nTEST_F(StackPrintTest, SKIP_ON_SPARC(ContainsFullTrace)) {\n  // Override stack trace format to make testing code independent of default\n  // flag values.\n  CommonFlags flags;\n  flags.CopyFrom(*common_flags());\n  flags.stack_trace_format = \"#%n %p\";\n  OverrideCommonFlags(flags);\n\n  UnwindFast();\n\n  char buf[3000];\n  trace.PrintTo(buf, sizeof(buf));\n  EXPECT_THAT(std::string(buf),\n              MatchesRegex(\"(#[0-9]+ 0x[0-9a-f]+\\n){\" +\n                           std::to_string(trace.size) + \"}\\n\"));\n}\n\nTEST_F(StackPrintTest, SKIP_ON_SPARC(TruncatesContents)) {\n  UnwindFast();\n\n  char buf[3000];\n  uptr actual_len = trace.PrintTo(buf, sizeof(buf));\n  ASSERT_LT(actual_len, sizeof(buf));\n\n  char tinybuf[10];\n  trace.PrintTo(tinybuf, sizeof(tinybuf));\n\n  // This the the truncation case.\n  ASSERT_GT(actual_len, sizeof(tinybuf));\n\n  // The truncated contents should be a prefix of the full contents.\n  size_t lastpos = sizeof(tinybuf) - 1;\n  EXPECT_EQ(strncmp(buf, tinybuf, lastpos), 0);\n  EXPECT_EQ(tinybuf[lastpos], '\\0');\n\n  // Full bufffer has more contents...\n  EXPECT_NE(buf[lastpos], '\\0');\n}\n\nTEST_F(StackPrintTest, SKIP_ON_SPARC(WorksWithEmptyStack)) {\n  char buf[3000];\n  trace.PrintTo(buf, sizeof(buf));\n  EXPECT_NE(strstr(buf, \"<empty stack>\"), nullptr);\n}\n\nTEST_F(StackPrintTest, SKIP_ON_SPARC(ReturnsCorrectLength)) {\n  UnwindFast();\n\n  char buf[3000];\n  uptr len = trace.PrintTo(buf, sizeof(buf));\n  size_t actual_len = strlen(buf);\n  ASSERT_LT(len, sizeof(buf));\n  EXPECT_EQ(len, actual_len);\n\n  char tinybuf[5];\n  len = trace.PrintTo(tinybuf, sizeof(tinybuf));\n  size_t truncated_len = strlen(tinybuf);\n  ASSERT_GE(len, sizeof(tinybuf));\n  EXPECT_EQ(len, actual_len);\n  EXPECT_EQ(truncated_len, sizeof(tinybuf) - 1);\n}\n\nTEST_F(StackPrintTest, SKIP_ON_SPARC(AcceptsZeroSize)) {\n  UnwindFast();\n  char buf[1];\n  EXPECT_GT(trace.PrintTo(buf, 0), 0u);\n}\n\nusing StackPrintDeathTest = StackPrintTest;\n\nTEST_F(StackPrintDeathTest, SKIP_ON_SPARC(RequiresNonNullBuffer)) {\n  UnwindFast();\n  EXPECT_DEATH(trace.PrintTo(NULL, 100), \"\");\n}\n\n#endif // SANITIZER_CAN_FAST_UNWIND\n\nTEST(SlowUnwindTest, ShortStackTrace) {\n  BufferedStackTrace stack;\n  uptr pc = StackTrace::GetCurrentPc();\n  uptr bp = GET_CURRENT_FRAME();\n  stack.Unwind(pc, bp, nullptr, false, /*max_depth=*/0);\n  EXPECT_EQ(0U, stack.size);\n  EXPECT_EQ(0U, stack.top_frame_bp);\n  stack.Unwind(pc, bp, nullptr, false, /*max_depth=*/1);\n  EXPECT_EQ(1U, stack.size);\n  EXPECT_EQ(pc, stack.trace[0]);\n  EXPECT_EQ(bp, stack.top_frame_bp);\n}\n\nTEST(GetCurrentPc, Basic) {\n  // Test that PCs obtained via GET_CURRENT_PC()\n  // and StackTrace::GetCurrentPc() are all different\n  // and are close to the function start.\n  struct Local {\n    static NOINLINE void Test() {\n      const uptr pcs[] = {\n          (uptr)&Local::Test,\n          GET_CURRENT_PC(),\n          StackTrace::GetCurrentPc(),\n          StackTrace::GetCurrentPc(),\n      };\n      for (uptr i = 0; i < ARRAY_SIZE(pcs); i++)\n        Printf(\"pc%zu: 0x%zx\\n\", i, pcs[i]);\n      for (uptr i = 1; i < ARRAY_SIZE(pcs); i++) {\n        EXPECT_GT(pcs[i], pcs[0]);\n        EXPECT_LT(pcs[i], pcs[0] + 1000);\n        for (uptr j = 0; j < i; j++) EXPECT_NE(pcs[i], pcs[j]);\n      }\n    }\n  };\n  Local::Test();\n}\n\n// Dummy implementation. This should never be called, but is required to link\n// non-optimized builds of this test.\nvoid BufferedStackTrace::UnwindImpl(uptr pc, uptr bp, void *context,\n                                    bool request_fast, u32 max_depth) {\n  UNIMPLEMENTED();\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_stoptheworld_test.cpp",
    "content": "//===-- sanitizer_stoptheworld_test.cpp -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for sanitizer_stoptheworld.h\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_stoptheworld.h\"\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && defined(__x86_64__)\n\n#  include <atomic>\n#  include <mutex>\n#  include <thread>\n\n#  include \"gtest/gtest.h\"\n#  include \"sanitizer_common/sanitizer_common.h\"\n#  include \"sanitizer_common/sanitizer_libc.h\"\n\nnamespace __sanitizer {\n\nstatic std::mutex mutex;\n\nstruct CallbackArgument {\n  std::atomic_int counter = {};\n  std::atomic_bool threads_stopped = {};\n  std::atomic_bool callback_executed = {};\n};\n\nvoid IncrementerThread(CallbackArgument &callback_argument) {\n  while (true) {\n    callback_argument.counter++;\n\n    if (mutex.try_lock()) {\n      mutex.unlock();\n      return;\n    }\n\n    std::this_thread::yield();\n  }\n}\n\n// This callback checks that IncrementerThread is suspended at the time of its\n// execution.\nvoid Callback(const SuspendedThreadsList &suspended_threads_list,\n              void *argument) {\n  CallbackArgument *callback_argument = (CallbackArgument *)argument;\n  callback_argument->callback_executed = true;\n  int counter_at_init = callback_argument->counter;\n  for (uptr i = 0; i < 1000; i++) {\n    std::this_thread::yield();\n    if (callback_argument->counter != counter_at_init) {\n      callback_argument->threads_stopped = false;\n      return;\n    }\n  }\n  callback_argument->threads_stopped = true;\n}\n\nTEST(StopTheWorld, SuspendThreadsSimple) {\n  CallbackArgument argument;\n  std::thread thread;\n  {\n    std::lock_guard<std::mutex> lock(mutex);\n    thread = std::thread(IncrementerThread, std::ref(argument));\n    StopTheWorld(&Callback, &argument);\n  }\n  EXPECT_TRUE(argument.callback_executed);\n  EXPECT_TRUE(argument.threads_stopped);\n  // argument is on stack, so we have to wait for the incrementer thread to\n  // terminate before we can return from this function.\n  ASSERT_NO_THROW(thread.join());\n}\n\n// A more comprehensive test where we spawn a bunch of threads while executing\n// StopTheWorld in parallel.\nstatic const uptr kThreadCount = 50;\nstatic const uptr kStopWorldAfter = 10;  // let this many threads spawn first\n\nstruct AdvancedCallbackArgument {\n  std::atomic_uintptr_t thread_index = {};\n  std::atomic_int counters[kThreadCount] = {};\n  std::thread threads[kThreadCount];\n  std::atomic_bool threads_stopped = {};\n  std::atomic_bool callback_executed = {};\n};\n\nvoid AdvancedIncrementerThread(AdvancedCallbackArgument &callback_argument) {\n  uptr this_thread_index = callback_argument.thread_index++;\n  // Spawn the next thread.\n  if (this_thread_index + 1 < kThreadCount) {\n    callback_argument.threads[this_thread_index + 1] =\n        std::thread(AdvancedIncrementerThread, std::ref(callback_argument));\n  }\n  // Do the actual work.\n  while (true) {\n    callback_argument.counters[this_thread_index]++;\n    if (mutex.try_lock()) {\n      mutex.unlock();\n      return;\n    }\n\n    std::this_thread::yield();\n  }\n}\n\nvoid AdvancedCallback(const SuspendedThreadsList &suspended_threads_list,\n                      void *argument) {\n  AdvancedCallbackArgument *callback_argument =\n      (AdvancedCallbackArgument *)argument;\n  callback_argument->callback_executed = true;\n\n  int counters_at_init[kThreadCount];\n  for (uptr j = 0; j < kThreadCount; j++)\n    counters_at_init[j] = callback_argument->counters[j];\n  for (uptr i = 0; i < 10; i++) {\n    std::this_thread::yield();\n    for (uptr j = 0; j < kThreadCount; j++)\n      if (callback_argument->counters[j] != counters_at_init[j]) {\n        callback_argument->threads_stopped = false;\n        return;\n      }\n  }\n  callback_argument->threads_stopped = true;\n}\n\nTEST(StopTheWorld, SuspendThreadsAdvanced) {\n  AdvancedCallbackArgument argument;\n\n  {\n    std::lock_guard<std::mutex> lock(mutex);\n    argument.threads[0] =\n        std::thread(AdvancedIncrementerThread, std::ref(argument));\n    // Wait for several threads to spawn before proceeding.\n    while (argument.thread_index < kStopWorldAfter) std::this_thread::yield();\n    StopTheWorld(&AdvancedCallback, &argument);\n    EXPECT_TRUE(argument.callback_executed);\n    EXPECT_TRUE(argument.threads_stopped);\n\n    // Wait for all threads to spawn before we start terminating them.\n    while (argument.thread_index < kThreadCount) std::this_thread::yield();\n  }\n  // Signal the threads to terminate.\n  for (auto &t : argument.threads) t.join();\n}\n\nstatic void SegvCallback(const SuspendedThreadsList &suspended_threads_list,\n                         void *argument) {\n  *(volatile int *)0x1234 = 0;\n}\n\n#  if SANITIZER_WINDOWS\n#    define MAYBE_SegvInCallback DISABLED_SegvInCallback\n#  else\n#    define MAYBE_SegvInCallback SegvInCallback\n#  endif\n\nTEST(StopTheWorld, MAYBE_SegvInCallback) {\n  // Test that tracer thread catches SIGSEGV.\n  StopTheWorld(&SegvCallback, NULL);\n}\n\n}  // namespace __sanitizer\n\n#endif  // SANITIZER_LINUX && defined(__x86_64__)\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cpp",
    "content": "//===-- sanitizer_stoptheworld_testlib.cpp --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n// Dynamic library to test StopTheWorld functionality.\n// When loaded with LD_PRELOAD, it will periodically suspend all threads.\n//===----------------------------------------------------------------------===//\n/* Usage:\nclang++ -fno-exceptions -g -fPIC -I. \\\n sanitizer_common/tests/sanitizer_stoptheworld_testlib.cpp \\\n sanitizer_common/sanitizer_*.cpp -shared -lpthread -o teststoptheworld.so\nLD_PRELOAD=`pwd`/teststoptheworld.so /your/app\n*/\n\n#include \"sanitizer_common/sanitizer_platform.h\"\n#if SANITIZER_LINUX\n\n#include <dlfcn.h>\n#include <stddef.h>\n#include <stdio.h>\n#include <pthread.h>\n#include <unistd.h>\n\n#include \"sanitizer_common/sanitizer_stoptheworld.h\"\n\nnamespace {\nconst uptr kSuspendDuration = 3;\nconst uptr kRunDuration = 3;\n\nvoid Callback(const SuspendedThreadsList &suspended_threads_list,\n              void *argument) {\n  sleep(kSuspendDuration);\n}\n\nvoid *SuspenderThread(void *argument) {\n  while (true) {\n    sleep(kRunDuration);\n    StopTheWorld(Callback, NULL);\n  }\n  return NULL;\n}\n\n__attribute__((constructor)) void StopTheWorldTestLibConstructor(void) {\n  pthread_t thread_id;\n  pthread_create(&thread_id, NULL, SuspenderThread, NULL);\n}\n}  // namespace\n\n#endif  // SANITIZER_LINUX\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_suppressions_test.cpp",
    "content": "//===-- sanitizer_suppressions_test.cpp -----------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_suppressions.h\"\n#include \"gtest/gtest.h\"\n\n#include <string.h>\n\nnamespace __sanitizer {\n\nstatic bool MyMatch(const char *templ, const char *func) {\n  char tmp[1024];\n  snprintf(tmp, sizeof(tmp), \"%s\", templ);\n  return TemplateMatch(tmp, func);\n}\n\nTEST(Suppressions, Match) {\n  EXPECT_TRUE(MyMatch(\"foobar$\", \"foobar\"));\n\n  EXPECT_TRUE(MyMatch(\"foobar\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"*foobar*\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"foobar\", \"prefix_foobar_postfix\"));\n  EXPECT_TRUE(MyMatch(\"*foobar*\", \"prefix_foobar_postfix\"));\n  EXPECT_TRUE(MyMatch(\"foo*bar\", \"foo_middle_bar\"));\n  EXPECT_TRUE(MyMatch(\"foo*bar\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"foo*bar*baz\", \"foo_middle_bar_another_baz\"));\n  EXPECT_TRUE(MyMatch(\"foo*bar*baz\", \"foo_middle_barbaz\"));\n  EXPECT_TRUE(MyMatch(\"^foobar\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"^foobar\", \"foobar_postfix\"));\n  EXPECT_TRUE(MyMatch(\"^*foobar\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"^*foobar\", \"prefix_foobar\"));\n  EXPECT_TRUE(MyMatch(\"foobar$\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"foobar$\", \"prefix_foobar\"));\n  EXPECT_TRUE(MyMatch(\"*foobar*$\", \"foobar\"));\n  EXPECT_TRUE(MyMatch(\"*foobar*$\", \"foobar_postfix\"));\n  EXPECT_TRUE(MyMatch(\"^foobar$\", \"foobar\"));\n\n  EXPECT_FALSE(MyMatch(\"foo\", \"baz\"));\n  EXPECT_FALSE(MyMatch(\"foobarbaz\", \"foobar\"));\n  EXPECT_FALSE(MyMatch(\"foobarbaz\", \"barbaz\"));\n  EXPECT_FALSE(MyMatch(\"foo*bar\", \"foobaz\"));\n  EXPECT_FALSE(MyMatch(\"foo*bar\", \"foo_baz\"));\n  EXPECT_FALSE(MyMatch(\"^foobar\", \"prefix_foobar\"));\n  EXPECT_FALSE(MyMatch(\"foobar$\", \"foobar_postfix\"));\n  EXPECT_FALSE(MyMatch(\"^foobar$\", \"prefix_foobar\"));\n  EXPECT_FALSE(MyMatch(\"^foobar$\", \"foobar_postfix\"));\n  EXPECT_FALSE(MyMatch(\"foo^bar\", \"foobar\"));\n  EXPECT_FALSE(MyMatch(\"foo$bar\", \"foobar\"));\n  EXPECT_FALSE(MyMatch(\"foo$^bar\", \"foobar\"));\n}\n\nstatic const char *kTestSuppressionTypes[] = {\"race\", \"thread\", \"mutex\",\n                                              \"signal\"};\n\nclass SuppressionContextTest : public ::testing::Test {\n public:\n  SuppressionContextTest()\n      : ctx_(kTestSuppressionTypes, ARRAY_SIZE(kTestSuppressionTypes)) {}\n\n protected:\n  SuppressionContext ctx_;\n\n  void CheckSuppressions(unsigned count, std::vector<const char *> types,\n                         std::vector<const char *> templs) const {\n    EXPECT_EQ(count, ctx_.SuppressionCount());\n    for (unsigned i = 0; i < count; i++) {\n      const Suppression *s = ctx_.SuppressionAt(i);\n      EXPECT_STREQ(types[i], s->type);\n      EXPECT_STREQ(templs[i], s->templ);\n    }\n  }\n};\n\nTEST_F(SuppressionContextTest, Parse) {\n  ctx_.Parse(\n      \"race:foo\\n\"\n      \" \\trace:bar\\n\"\n      \"race:baz\\t \\n\"\n      \"# a comment\\n\"\n      \"race:quz\\n\");\n  CheckSuppressions(4, {\"race\", \"race\", \"race\", \"race\"},\n                    {\"foo\", \"bar\", \"baz\", \"quz\"});\n}\n\nTEST_F(SuppressionContextTest, Parse2) {\n  ctx_.Parse(\n      \"  \\t# first line comment\\n\"\n      \" \\trace:bar \\t\\n\"\n      \"race:baz* *baz\\n\"\n      \"# a comment\\n\"\n      \"# last line comment\\n\");\n  CheckSuppressions(2, {\"race\", \"race\"}, {\"bar\", \"baz* *baz\"});\n}\n\nTEST_F(SuppressionContextTest, Parse3) {\n  ctx_.Parse(\n      \"# last suppression w/o line-feed\\n\"\n      \"race:foo\\n\"\n      \"race:bar\\r\\n\"\n      \"race:baz\");\n  CheckSuppressions(3, {\"race\", \"race\", \"race\"}, {\"foo\", \"bar\", \"baz\"});\n}\n\nTEST_F(SuppressionContextTest, ParseType) {\n  ctx_.Parse(\n      \"race:foo\\n\"\n      \"thread:bar\\n\"\n      \"mutex:baz\\n\"\n      \"signal:quz\\n\");\n  CheckSuppressions(4, {\"race\", \"thread\", \"mutex\", \"signal\"},\n                    {\"foo\", \"bar\", \"baz\", \"quz\"});\n}\n\nTEST_F(SuppressionContextTest, HasSuppressionType) {\n  ctx_.Parse(\n    \"race:foo\\n\"\n    \"thread:bar\\n\");\n  EXPECT_TRUE(ctx_.HasSuppressionType(\"race\"));\n  EXPECT_TRUE(ctx_.HasSuppressionType(\"thread\"));\n  EXPECT_FALSE(ctx_.HasSuppressionType(\"mutex\"));\n  EXPECT_FALSE(ctx_.HasSuppressionType(\"signal\"));\n}\n\nTEST_F(SuppressionContextTest, RegressionTestForBufferOverflowInSuppressions) {\n  EXPECT_DEATH(ctx_.Parse(\"race\"), \"failed to parse suppressions\");\n  EXPECT_DEATH(ctx_.Parse(\"foo\"), \"failed to parse suppressions\");\n}\n\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_symbolizer_test.cpp",
    "content": "//===-- sanitizer_symbolizer_test.cpp -------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// Tests for sanitizer_symbolizer.h and sanitizer_symbolizer_internal.h\n//\n//===----------------------------------------------------------------------===//\n\n#include \"sanitizer_common/sanitizer_allocator_internal.h\"\n#include \"sanitizer_common/sanitizer_symbolizer_internal.h\"\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nTEST(Symbolizer, ExtractToken) {\n  char *token;\n  const char *rest;\n\n  rest = ExtractToken(\"a;b;c\", \";\", &token);\n  EXPECT_STREQ(\"a\", token);\n  EXPECT_STREQ(\"b;c\", rest);\n  InternalFree(token);\n\n  rest = ExtractToken(\"aaa-bbb.ccc\", \";.-*\", &token);\n  EXPECT_STREQ(\"aaa\", token);\n  EXPECT_STREQ(\"bbb.ccc\", rest);\n  InternalFree(token);\n}\n\nTEST(Symbolizer, ExtractInt) {\n  int token;\n  const char *rest = ExtractInt(\"123,456;789\", \";,\", &token);\n  EXPECT_EQ(123, token);\n  EXPECT_STREQ(\"456;789\", rest);\n}\n\nTEST(Symbolizer, ExtractUptr) {\n  uptr token;\n  const char *rest = ExtractUptr(\"123,456;789\", \";,\", &token);\n  EXPECT_EQ(123U, token);\n  EXPECT_STREQ(\"456;789\", rest);\n}\n\nTEST(Symbolizer, ExtractTokenUpToDelimiter) {\n  char *token;\n  const char *rest =\n      ExtractTokenUpToDelimiter(\"aaa-+-bbb-+-ccc\", \"-+-\", &token);\n  EXPECT_STREQ(\"aaa\", token);\n  EXPECT_STREQ(\"bbb-+-ccc\", rest);\n  InternalFree(token);\n}\n\n#if !SANITIZER_WINDOWS\nTEST(Symbolizer, DemangleSwiftAndCXX) {\n  // Swift names are not demangled in default llvm build because Swift\n  // runtime is not linked in.\n  EXPECT_STREQ(\"_TtSd\", DemangleSwiftAndCXX(\"_TtSd\"));\n  // Check that the rest demangles properly.\n  EXPECT_STREQ(\"f1(char*, int)\", DemangleSwiftAndCXX(\"_Z2f1Pci\"));\n#if !SANITIZER_FREEBSD // QoI issue with libcxxrt on FreeBSD\n  EXPECT_STREQ(\"foo\", DemangleSwiftAndCXX(\"foo\"));\n#endif\n  EXPECT_STREQ(\"\", DemangleSwiftAndCXX(\"\"));\n}\n#endif\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_test_config.h",
    "content": "//===-- sanitizer_test_config.h ---------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of *Sanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#if !defined(INCLUDED_FROM_SANITIZER_TEST_UTILS_H)\n# error \"This file should be included into sanitizer_test_utils.h only\"\n#endif\n\n#ifndef SANITIZER_TEST_CONFIG_H\n#define SANITIZER_TEST_CONFIG_H\n\n#include <vector>\n#include <string>\n#include <map>\n\n#if SANITIZER_USE_DEJAGNU_GTEST\n# include \"dejagnu-gtest.h\"\n#else\n# include \"gtest/gtest.h\"\n#endif\n\n#endif  // SANITIZER_TEST_CONFIG_H\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_test_main.cpp",
    "content": "//===-- sanitizer_test_main.cpp -------------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/sanitizer_flags.h\"\n\nconst char *argv0;\n\nint main(int argc, char **argv) {\n  argv0 = argv[0];\n  testing::GTEST_FLAG(death_test_style) = \"threadsafe\";\n  testing::InitGoogleTest(&argc, argv);\n  __sanitizer::SetCommonFlagsDefaults();\n  return RUN_ALL_TESTS();\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_test_utils.h",
    "content": "//===-- sanitizer_test_utils.h ----------------------------------*- C++ -*-===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of *Sanitizer runtime.\n// Common unit tests utilities.\n//\n//===----------------------------------------------------------------------===//\n\n#ifndef SANITIZER_TEST_UTILS_H\n#define SANITIZER_TEST_UTILS_H\n\n#if defined(_WIN32)\n// <windows.h> should always be the first include on Windows.\n# include <windows.h>\n// MSVS headers define max/min as macros, so std::max/min gets crazy.\n# undef max\n# undef min\n#endif\n\n#if !defined(SANITIZER_EXTERNAL_TEST_CONFIG)\n# define INCLUDED_FROM_SANITIZER_TEST_UTILS_H\n# include \"sanitizer_test_config.h\"\n# undef INCLUDED_FROM_SANITIZER_TEST_UTILS_H\n#endif\n\n#include <stdint.h>\n\n#if defined(_MSC_VER)\n# define NOINLINE __declspec(noinline)\n#else  // defined(_MSC_VER)\n# define NOINLINE __attribute__((noinline))\n#endif  // defined(_MSC_VER)\n\n#if !defined(_MSC_VER) || defined(__clang__)\n# define UNUSED __attribute__((unused))\n# define USED __attribute__((used))\n#else\n# define UNUSED\n# define USED\n#endif\n\n#if !defined(__has_feature)\n#define __has_feature(x) 0\n#endif\n\n#ifndef ATTRIBUTE_NO_SANITIZE_ADDRESS\n# if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)\n#  define ATTRIBUTE_NO_SANITIZE_ADDRESS \\\n    __attribute__((no_sanitize_address))\n# else\n#  define ATTRIBUTE_NO_SANITIZE_ADDRESS\n# endif\n#endif  // ATTRIBUTE_NO_SANITIZE_ADDRESS\n\n#if __LP64__ || defined(_WIN64)\n#  define SANITIZER_WORDSIZE 64\n#else\n#  define SANITIZER_WORDSIZE 32\n#endif\n\n// Make the compiler thinks that something is going on there.\ninline void break_optimization(void *arg) {\n#if !defined(_WIN32) || defined(__clang__)\n  __asm__ __volatile__(\"\" : : \"r\" (arg) : \"memory\");\n#endif\n}\n\n// This function returns its parameter but in such a way that compiler\n// can not prove it.\ntemplate<class T>\nNOINLINE\nstatic T Ident(T t) {\n  T ret = t;\n  break_optimization(&ret);\n  return ret;\n}\n\n// Simple stand-alone pseudorandom number generator.\n// Current algorithm is ANSI C linear congruential PRNG.\nstatic inline uint32_t my_rand_r(uint32_t* state) {\n  return (*state = *state * 1103515245 + 12345) >> 16;\n}\n\nstatic uint32_t global_seed = 0;\n\nstatic inline uint32_t my_rand() {\n  return my_rand_r(&global_seed);\n}\n\n// Set availability of platform-specific functions.\n\n#if !defined(__APPLE__) && !defined(__ANDROID__) && !defined(_WIN32)\n# define SANITIZER_TEST_HAS_POSIX_MEMALIGN 1\n#else\n# define SANITIZER_TEST_HAS_POSIX_MEMALIGN 0\n#endif\n\n#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__ANDROID__) && \\\n    !defined(__NetBSD__) && !defined(_WIN32)\n# define SANITIZER_TEST_HAS_MEMALIGN 1\n#else\n# define SANITIZER_TEST_HAS_MEMALIGN 0\n#endif\n\n#if defined(__GLIBC__)\n# define SANITIZER_TEST_HAS_PVALLOC 1\n# define SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE 1\n#else\n# define SANITIZER_TEST_HAS_PVALLOC 0\n# define SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE 0\n#endif\n\n#if !defined(__APPLE__)\n# define SANITIZER_TEST_HAS_STRNLEN 1\n#else\n# define SANITIZER_TEST_HAS_STRNLEN 0\n#endif\n\n#if defined(__FreeBSD__) || defined(__NetBSD__)\n# define SANITIZER_TEST_HAS_PRINTF_L 1\n#else\n# define SANITIZER_TEST_HAS_PRINTF_L 0\n#endif\n\n#if !defined(_MSC_VER)\n# define SANITIZER_TEST_HAS_STRNDUP 1\n#else\n# define SANITIZER_TEST_HAS_STRNDUP 0\n#endif\n\n#endif  // SANITIZER_TEST_UTILS_H\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_thread_registry_test.cpp",
    "content": "//===-- sanitizer_thread_registry_test.cpp --------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of shared sanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_thread_registry.h\"\n\n#include \"sanitizer_pthread_wrappers.h\"\n\n#include \"gtest/gtest.h\"\n\n#include <vector>\n\nnamespace __sanitizer {\n\nstatic Mutex tctx_allocator_lock;\nstatic LowLevelAllocator tctx_allocator;\n\ntemplate<typename TCTX>\nstatic ThreadContextBase *GetThreadContext(u32 tid) {\n  Lock l(&tctx_allocator_lock);\n  return new(tctx_allocator) TCTX(tid);\n}\n\nstatic const u32 kMaxRegistryThreads = 1000;\nstatic const u32 kRegistryQuarantine = 2;\n\nstatic void CheckThreadQuantity(ThreadRegistry *registry, uptr exp_total,\n                                uptr exp_running, uptr exp_alive) {\n  uptr total, running, alive;\n  registry->GetNumberOfThreads(&total, &running, &alive);\n  EXPECT_EQ(exp_total, total);\n  EXPECT_EQ(exp_running, running);\n  EXPECT_EQ(exp_alive, alive);\n}\n\nstatic bool is_detached(u32 tid) {\n  return (tid % 2 == 0);\n}\n\nstatic uptr get_uid(u32 tid) {\n  return tid * 2;\n}\n\nstatic bool HasName(ThreadContextBase *tctx, void *arg) {\n  char *name = (char*)arg;\n  return (0 == internal_strcmp(tctx->name, name));\n}\n\nstatic bool HasUid(ThreadContextBase *tctx, void *arg) {\n  uptr uid = (uptr)arg;\n  return (tctx->user_id == uid);\n}\n\nstatic void MarkUidAsPresent(ThreadContextBase *tctx, void *arg) {\n  bool *arr = (bool*)arg;\n  arr[tctx->tid] = true;\n}\n\nstatic void TestRegistry(ThreadRegistry *registry, bool has_quarantine) {\n  // Create and start a main thread.\n  EXPECT_EQ(0U, registry->CreateThread(get_uid(0), true, -1, 0));\n  registry->StartThread(0, 0, ThreadType::Regular, 0);\n  // Create a bunch of threads.\n  for (u32 i = 1; i <= 10; i++) {\n    EXPECT_EQ(i, registry->CreateThread(get_uid(i), is_detached(i), 0, 0));\n  }\n  CheckThreadQuantity(registry, 11, 1, 11);\n  // Start some of them.\n  for (u32 i = 1; i <= 5; i++) {\n    registry->StartThread(i, 0, ThreadType::Regular, 0);\n  }\n  CheckThreadQuantity(registry, 11, 6, 11);\n  // Finish, create and start more threads.\n  for (u32 i = 1; i <= 5; i++) {\n    registry->FinishThread(i);\n    if (!is_detached(i))\n      registry->JoinThread(i, 0);\n  }\n  for (u32 i = 6; i <= 10; i++) {\n    registry->StartThread(i, 0, ThreadType::Regular, 0);\n  }\n  std::vector<u32> new_tids;\n  for (u32 i = 11; i <= 15; i++) {\n    new_tids.push_back(\n        registry->CreateThread(get_uid(i), is_detached(i), 0, 0));\n  }\n  ASSERT_LE(kRegistryQuarantine, 5U);\n  u32 exp_total = 16 - (has_quarantine ? 5 - kRegistryQuarantine  : 0);\n  CheckThreadQuantity(registry, exp_total, 6, 11);\n  // Test SetThreadName and FindThread.\n  registry->SetThreadName(6, \"six\");\n  registry->SetThreadName(7, \"seven\");\n  EXPECT_EQ(7U, registry->FindThread(HasName, (void*)\"seven\"));\n  EXPECT_EQ(kInvalidTid, registry->FindThread(HasName, (void *)\"none\"));\n  EXPECT_EQ(0U, registry->FindThread(HasUid, (void*)get_uid(0)));\n  EXPECT_EQ(10U, registry->FindThread(HasUid, (void*)get_uid(10)));\n  EXPECT_EQ(kInvalidTid, registry->FindThread(HasUid, (void *)0x1234));\n  // Detach and finish and join remaining threads.\n  for (u32 i = 6; i <= 10; i++) {\n    registry->DetachThread(i, 0);\n    registry->FinishThread(i);\n  }\n  for (u32 i = 0; i < new_tids.size(); i++) {\n    u32 tid = new_tids[i];\n    registry->StartThread(tid, 0, ThreadType::Regular, 0);\n    registry->DetachThread(tid, 0);\n    registry->FinishThread(tid);\n  }\n  CheckThreadQuantity(registry, exp_total, 1, 1);\n  // Test methods that require the caller to hold a ThreadRegistryLock.\n  bool has_tid[16];\n  internal_memset(&has_tid[0], 0, sizeof(has_tid));\n  {\n    ThreadRegistryLock l(registry);\n    registry->RunCallbackForEachThreadLocked(MarkUidAsPresent, &has_tid[0]);\n  }\n  for (u32 i = 0; i < exp_total; i++) {\n    EXPECT_TRUE(has_tid[i]);\n  }\n  {\n    ThreadRegistryLock l(registry);\n    registry->CheckLocked();\n    ThreadContextBase *main_thread = registry->GetThreadLocked(0);\n    EXPECT_EQ(main_thread, registry->FindThreadContextLocked(\n        HasUid, (void*)get_uid(0)));\n  }\n  EXPECT_EQ(11U, registry->GetMaxAliveThreads());\n}\n\nTEST(SanitizerCommon, ThreadRegistryTest) {\n  ThreadRegistry quarantine_registry(GetThreadContext<ThreadContextBase>,\n                                     kMaxRegistryThreads, kRegistryQuarantine,\n                                     0);\n  TestRegistry(&quarantine_registry, true);\n\n  ThreadRegistry no_quarantine_registry(GetThreadContext<ThreadContextBase>,\n                                        kMaxRegistryThreads,\n                                        kMaxRegistryThreads, 0);\n  TestRegistry(&no_quarantine_registry, false);\n}\n\nstatic const int kThreadsPerShard = 20;\nstatic const int kNumShards = 25;\n\nstatic int num_created[kNumShards + 1];\nstatic int num_started[kNumShards + 1];\nstatic int num_joined[kNumShards + 1];\n\nnamespace {\n\nstruct RunThreadArgs {\n  ThreadRegistry *registry;\n  uptr shard;  // started from 1.\n};\n\nclass TestThreadContext final : public ThreadContextBase {\n public:\n  explicit TestThreadContext(int tid) : ThreadContextBase(tid) {}\n  void OnJoined(void *arg) {\n    uptr shard = (uptr)arg;\n    num_joined[shard]++;\n  }\n  void OnStarted(void *arg) {\n    uptr shard = (uptr)arg;\n    num_started[shard]++;\n  }\n  void OnCreated(void *arg) {\n    uptr shard = (uptr)arg;\n    num_created[shard]++;\n  }\n};\n\n}  // namespace\n\nvoid *RunThread(void *arg) {\n  RunThreadArgs *args = static_cast<RunThreadArgs*>(arg);\n  std::vector<int> tids;\n  for (int i = 0; i < kThreadsPerShard; i++)\n    tids.push_back(\n        args->registry->CreateThread(0, false, 0, (void*)args->shard));\n  for (int i = 0; i < kThreadsPerShard; i++)\n    args->registry->StartThread(tids[i], 0, ThreadType::Regular,\n        (void*)args->shard);\n  for (int i = 0; i < kThreadsPerShard; i++)\n    args->registry->FinishThread(tids[i]);\n  for (int i = 0; i < kThreadsPerShard; i++)\n    args->registry->JoinThread(tids[i], (void*)args->shard);\n  return 0;\n}\n\nstatic void ThreadedTestRegistry(ThreadRegistry *registry) {\n  // Create and start a main thread.\n  EXPECT_EQ(0U, registry->CreateThread(0, true, -1, 0));\n  registry->StartThread(0, 0, ThreadType::Regular, 0);\n  pthread_t threads[kNumShards];\n  RunThreadArgs args[kNumShards];\n  for (int i = 0; i < kNumShards; i++) {\n    args[i].registry = registry;\n    args[i].shard = i + 1;\n    PTHREAD_CREATE(&threads[i], 0, RunThread, &args[i]);\n  }\n  for (int i = 0; i < kNumShards; i++) {\n    PTHREAD_JOIN(threads[i], 0);\n  }\n  // Check that each thread created/started/joined correct amount\n  // of \"threads\" in thread_registry.\n  EXPECT_EQ(1, num_created[0]);\n  EXPECT_EQ(1, num_started[0]);\n  EXPECT_EQ(0, num_joined[0]);\n  for (int i = 1; i <= kNumShards; i++) {\n    EXPECT_EQ(kThreadsPerShard, num_created[i]);\n    EXPECT_EQ(kThreadsPerShard, num_started[i]);\n    EXPECT_EQ(kThreadsPerShard, num_joined[i]);\n  }\n}\n\nTEST(SanitizerCommon, ThreadRegistryThreadedTest) {\n  memset(&num_created, 0, sizeof(num_created));\n  memset(&num_started, 0, sizeof(num_created));\n  memset(&num_joined, 0, sizeof(num_created));\n\n  ThreadRegistry registry(GetThreadContext<TestThreadContext>,\n                          kThreadsPerShard * kNumShards + 1, 10, 0);\n  ThreadedTestRegistry(&registry);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_type_traits_test.cpp",
    "content": "//===-- sanitizer_type_traits_test.cpp ------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of ThreadSanitizer/AddressSanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_type_traits.h\"\n\n#include <vector>\n\n#include \"gtest/gtest.h\"\n#include \"sanitizer_common/sanitizer_internal_defs.h\"\n\nnamespace __sanitizer {\n\nTEST(SanitizerCommon, IsSame) {\n  ASSERT_TRUE((is_same<unsigned, unsigned>::value));\n  ASSERT_TRUE((is_same<uptr, uptr>::value));\n  ASSERT_TRUE((is_same<sptr, sptr>::value));\n  ASSERT_TRUE((is_same<const uptr, const uptr>::value));\n\n  ASSERT_FALSE((is_same<unsigned, signed>::value));\n  ASSERT_FALSE((is_same<uptr, sptr>::value));\n  ASSERT_FALSE((is_same<uptr, const uptr>::value));\n}\n\nTEST(SanitizerCommon, Conditional) {\n  ASSERT_TRUE((is_same<int, conditional<true, int, double>::type>::value));\n  ASSERT_TRUE((is_same<double, conditional<false, int, double>::type>::value));\n}\n\nTEST(SanitizerCommon, RemoveReference) {\n  ASSERT_TRUE((is_same<int, remove_reference<int>::type>::value));\n  ASSERT_TRUE((is_same<const int, remove_reference<const int>::type>::value));\n  ASSERT_TRUE((is_same<int, remove_reference<int&>::type>::value));\n  ASSERT_TRUE((is_same<const int, remove_reference<const int&>::type>::value));\n  ASSERT_TRUE((is_same<int, remove_reference<int&&>::type>::value));\n}\n\nTEST(SanitizerCommon, Move) {\n  std::vector<int> v = {1, 2, 3};\n  auto v2 = __sanitizer::move(v);\n  EXPECT_EQ(3u, v2.size());\n  EXPECT_TRUE(v.empty());\n}\n\nTEST(SanitizerCommon, Forward) {\n  std::vector<int> v = {1, 2, 3};\n  auto v2 = __sanitizer::forward<std::vector<int>>(v);\n  EXPECT_EQ(3u, v2.size());\n  EXPECT_TRUE(v.empty());\n}\n\nTEST(SanitizerCommon, ForwardConst) {\n  const std::vector<int> v = {1, 2, 3};\n  auto v2 = __sanitizer::forward<const std::vector<int>&>(v);\n  EXPECT_EQ(3u, v2.size());\n  EXPECT_EQ(3u, v.size());\n}\n\nstruct TestStruct {\n  int a;\n  float b;\n};\n\nTEST(SanitizerCommon, IsTriviallyDestructible) {\n  ASSERT_TRUE((is_trivially_destructible<int>::value));\n  ASSERT_TRUE((is_trivially_destructible<TestStruct>::value));\n  ASSERT_FALSE((is_trivially_destructible<std::vector<int>>::value));\n}\n\nTEST(SanitizerCommon, IsTriviallyCopyable) {\n  ASSERT_TRUE((is_trivially_copyable<int>::value));\n  ASSERT_TRUE((is_trivially_copyable<TestStruct>::value));\n  ASSERT_FALSE((is_trivially_copyable<std::vector<int>>::value));\n}\n\n}  // namespace __sanitizer"
  },
  {
    "path": "runtime/sanitizer_common/tests/sanitizer_vector_test.cpp",
    "content": "//===-- sanitizer_vector_test.cpp -----------------------------------------===//\n//\n// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n// See https://llvm.org/LICENSE.txt for license information.\n// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n//\n//===----------------------------------------------------------------------===//\n//\n// This file is a part of *Sanitizer runtime.\n//\n//===----------------------------------------------------------------------===//\n#include \"sanitizer_common/sanitizer_vector.h\"\n#include \"gtest/gtest.h\"\n\nnamespace __sanitizer {\n\nTEST(Vector, Basic) {\n  Vector<int> v;\n  EXPECT_EQ(v.Size(), 0u);\n  v.PushBack(42);\n  EXPECT_EQ(v.Size(), 1u);\n  EXPECT_EQ(v[0], 42);\n  v.PushBack(43);\n  EXPECT_EQ(v.Size(), 2u);\n  EXPECT_EQ(v[0], 42);\n  EXPECT_EQ(v[1], 43);\n}\n\nTEST(Vector, Stride) {\n  Vector<int> v;\n  for (int i = 0; i < 1000; i++) {\n    v.PushBack(i);\n    EXPECT_EQ(v.Size(), i + 1u);\n    EXPECT_EQ(v[i], i);\n  }\n  for (int i = 0; i < 1000; i++) {\n    EXPECT_EQ(v[i], i);\n  }\n}\n\nTEST(Vector, ResizeReduction) {\n  Vector<int> v;\n  v.PushBack(0);\n  v.PushBack(0);\n  EXPECT_EQ(v.Size(), 2u);\n  v.Resize(1);\n  EXPECT_EQ(v.Size(), 1u);\n}\n\n}  // namespace __sanitizer\n"
  },
  {
    "path": "runtime/sanitizer_common/tests/standalone_malloc_test.cpp",
    "content": "#include <stdio.h>\n#include <vector>\n#include <pthread.h>\n#include <malloc.h>\n#include <algorithm>\n\nusing namespace std;\n\nconst size_t kNumThreds = 16;\nconst size_t kNumIters = 1 << 23;\n\ninline void break_optimization(void *arg) {\n  __asm__ __volatile__(\"\" : : \"r\" (arg) : \"memory\");\n}\n\n__attribute__((noinline))\nstatic void *MallocThread(void *t) {\n  size_t total_malloced = 0, total_freed = 0;\n  size_t max_in_use = 0;\n  size_t tid = reinterpret_cast<size_t>(t);\n  vector<pair<char *, size_t> > allocated;\n  allocated.reserve(kNumIters);\n  for (size_t i = 1; i < kNumIters; i++) {\n    if ((i % (kNumIters / 4)) == 0 && tid == 0)\n      fprintf(stderr, \"   T[%ld] iter %ld\\n\", tid, i);\n    bool allocate = (i % 5) <= 2;  // 60% malloc, 40% free\n    if (i > kNumIters / 4)\n      allocate = i % 2;  // then switch to 50% malloc, 50% free\n    if (allocate) {\n      size_t size = 1 + (i % 200);\n      if ((i % 10001) == 0)\n        size *= 4096;\n      total_malloced += size;\n      char *x = new char[size];\n      x[0] = x[size - 1] = x[size / 2] = 0;\n      allocated.push_back(make_pair(x, size));\n      max_in_use = max(max_in_use, total_malloced - total_freed);\n    } else {\n      if (allocated.empty()) continue;\n      size_t slot = i % allocated.size();\n      char *p = allocated[slot].first;\n      p[0] = 0;  // emulate last user touch of the block\n      size_t size = allocated[slot].second;\n      total_freed += size;\n      swap(allocated[slot], allocated.back());\n      allocated.pop_back();\n      delete [] p;\n    }\n  }\n  if (tid == 0)\n    fprintf(stderr, \"   T[%ld] total_malloced: %ldM in use %ldM max %ldM\\n\",\n           tid, total_malloced >> 20, (total_malloced - total_freed) >> 20,\n           max_in_use >> 20);\n  for (size_t i = 0; i < allocated.size(); i++)\n    delete [] allocated[i].first;\n  return 0;\n}\n\ntemplate <int depth>\nstruct DeepStack {\n  __attribute__((noinline))\n  static void *run(void *t) {\n    break_optimization(0);\n    DeepStack<depth - 1>::run(t);\n    break_optimization(0);\n    return 0;\n  }\n};\n\ntemplate<>\nstruct DeepStack<0> {\n  static void *run(void *t) {\n    MallocThread(t);\n    return 0;\n  }\n};\n\n// Build with -Dstandalone_malloc_test=main to make it a separate program.\nint standalone_malloc_test() {\n  pthread_t t[kNumThreds];\n  for (size_t i = 0; i < kNumThreds; i++)\n    pthread_create(&t[i], 0, DeepStack<200>::run, reinterpret_cast<void *>(i));\n  for (size_t i = 0; i < kNumThreds; i++)\n    pthread_join(t[i], 0);\n  malloc_stats();\n  return 0;\n}\n"
  },
  {
    "path": "runtime/sanitizer_common/weak_symbols.txt",
    "content": "___sanitizer_free_hook\n___sanitizer_malloc_hook\n___sanitizer_report_error_summary\n___sanitizer_sandbox_on_notify\n___sanitizer_symbolize_code\n___sanitizer_symbolize_data\n___sanitizer_symbolize_demangle\n___sanitizer_symbolize_flush\n___sanitizer_symbolize_set_demangle\n___sanitizer_symbolize_set_inline_frames\n"
  },
  {
    "path": "solvers/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\nset(CMAKE_CXX_STANDARD 17)\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -g\")\n\n## solvers\nadd_library(Z3Solver STATIC z3.cpp z3-ts.cpp)\ntarget_compile_options(Z3Solver PRIVATE -stdlib=libc++)\ntarget_include_directories(Z3Solver PUBLIC\n    ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n)\ninstall (TARGETS Z3Solver DESTINATION ${SYMSAN_LIB_DIR})\n\nadd_library(z3parser STATIC z3-ts.cpp)\ntarget_include_directories(z3parser PRIVATE\n    ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n)\n\nif (NOT LLVM_FOUND)\n  message(FATAL_ERROR \"You haven't install LLVM !\")\nendif()\n\n## rgd-solver\n\nadd_subdirectory(jigsaw)\n\nadd_library(rgd-solver STATIC\n    z3-solver.cpp\n    jit-solver.cpp\n    i2s-solver.cpp\n)\n\ntarget_compile_options(rgd-solver PRIVATE\n    -O3 -g -mcx16 -march=native -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free\n)\nif (ASAN_BUILD)\n  target_compile_options(rgd-solver PRIVATE -fsanitize=address)\nendif()\n\ntarget_include_directories(rgd-solver PRIVATE\n    ${CMAKE_CURRENT_SOURCE_DIR}\n    ${CMAKE_CURRENT_SOURCE_DIR}/../runtime\n)\n\ntarget_link_libraries(rgd-solver PRIVATE\n    tcmalloc\n    ${Z3_LIBRARY}\n    jigsaw\n    profiler\n)\n"
  },
  {
    "path": "solvers/i2s-solver.cpp",
    "content": "#include \"solver.h\"\n\n#include \"dfsan/dfsan.h\"\n\n#include <math.h>\n#include <string.h>\n\nusing namespace rgd;\n\n#define DEBUG 0\n\n#if !DEBUG\n#undef DEBUGF\n#define DEBUGF(_str...) do { } while (0)\n#elif !defined (DEBUGF)\n#define DEBUGF(_str...) do { fprintf(stderr, _str); } while (0)\n#endif\n\n#ifndef WARNF\n#define WARNF(_str...) do { fprintf(stderr, _str); } while (0)\n#endif\n\n#if defined(__GNUC__)\nstatic inline bool (likely)(bool x) { return __builtin_expect((x), true); }\nstatic inline bool (unlikely)(bool x) { return __builtin_expect((x), false); }\n#else\nstatic inline bool (likely)(bool x) { return x; }\nstatic inline bool (unlikely)(bool x) { return x; }\n#endif\n\n#undef SWAP64\n#define SWAP64(_x)                                                             \\\n  ({                                                                           \\\n                                                                               \\\n    uint64_t _ret = (_x);                                                           \\\n    _ret =                                                                     \\\n        (_ret & 0x00000000FFFFFFFF) << 32 | (_ret & 0xFFFFFFFF00000000) >> 32; \\\n    _ret =                                                                     \\\n        (_ret & 0x0000FFFF0000FFFF) << 16 | (_ret & 0xFFFF0000FFFF0000) >> 16; \\\n    _ret =                                                                     \\\n        (_ret & 0x00FF00FF00FF00FF) << 8 | (_ret & 0xFF00FF00FF00FF00) >> 8;   \\\n    _ret;                                                                      \\\n                                                                               \\\n  })\n\n// It is impossible to define 128 bit constants, so ...\n#ifdef WORD_SIZE_64\n  #define SWAPN(_x, _l)                            \\\n    ({                                             \\\n                                                   \\\n      u128  _res = (_x), _ret;                     \\\n      char *d = (char *)&_ret, *s = (char *)&_res; \\\n      int   i;                                     \\\n      for (i = 0; i < 16; i++)                     \\\n        d[15 - i] = s[i];                          \\\n      u32 sr = 128U - ((_l) << 3U);                \\\n      (_ret >>= sr);                               \\\n      (u128) _ret;                                 \\\n                                                   \\\n    })\n#endif\n\n#define SWAPNN(_x, _y, _l)                     \\\n  ({                                           \\\n                                               \\\n    char *d = (char *)(_x), *s = (char *)(_y); \\\n    u32   i, l = (_l)-1;                       \\\n    for (i = 0; i <= l; i++)                   \\\n      d[l - i] = s[i];                         \\\n                                               \\\n  })\n\nstatic uint64_t get_i2s_value(uint32_t comp, uint64_t v, bool rhs) {\n  switch (comp) {\n    case rgd::Equal:\n    case rgd::Ule:\n    case rgd::Uge:\n    case rgd::Sle:\n    case rgd::Sge:\n      return v;\n    case rgd::Distinct:\n    case rgd::Ugt:\n    case rgd::Sgt:\n      if (rhs) return v - 1;\n      else return v + 1;\n    case rgd::Ult:\n    case rgd::Slt:\n      if (rhs) return v + 1;\n      else return v - 1;\n    default:\n      WARNF(\"Non-relational i2s op %u!\\n\", comp);\n  }\n  return v;\n}\n\nstatic inline uint64_t _get_binop_value(uint64_t v1, uint64_t v2, uint16_t kind) {\n  switch (kind) {\n    case rgd::Add: return v1 + v2;\n    case rgd::Sub: return v1 - v2;\n    case rgd::Mul: return v1 * v2;\n    case rgd::UDiv: return v2 ? v1 / v2 : 0;\n    case rgd::SDiv: return v2 ? (int64_t)v1 / (int64_t)v2 : 0;\n    case rgd::URem: return v2 ? v1 % v2 : 0;\n    case rgd::SRem: return v2 ? (int64_t)v1 % (int64_t)v2 : 0;\n    case rgd::And: return v1 & v2;\n    case rgd::Or: return v1 | v2;\n    case rgd::Xor: return v1 ^ v2;\n    case rgd::Shl: return v1 << v2;\n    case rgd::LShr: return v1 >> v2;\n    case rgd::AShr: return (int64_t)v1 >> v2;\n    default: WARNF(\"Non-binary i2s op %u!\\n\", kind);\n  }\n  return 0;\n}\n\nstatic inline uint64_t _get_binop_value_r(uint64_t r, uint64_t const_op, uint16_t kind, bool rhs) {\n  // we aim to reverse the binary operation\n  // if rhs:              const_op op v = r\n  // if lhs (i.e., !rhs): v op const_op = r\n  switch (kind) {\n    case rgd::Add: return r - const_op; // v = r - const_op\n    case rgd::Sub: return rhs ? const_op - r : r + const_op; // rhs: v = const_op - r; lhs: v = r + const_op\n    case rgd::Mul: return r / const_op; // v = r / const_op\n    case rgd::UDiv: return rhs ? const_op / r : r * const_op; // rhs: v = const_op / r; lhs: v = r * const_op\n    case rgd::SDiv: return rhs ? (int64_t)const_op / (int64_t)r : (int64_t)r * (int64_t)const_op;\n    case rgd::URem:\n      if (rhs) {\n        if (const_op < r) {\n          WARNF(\"URem rhs const_op < r\\n\");\n          return r;\n        }\n        // const_op % v = r\n        // if const_op > r, const_op % (const_op - r) = r\n        // if const_op == r, const_op % (const_op + 1) = const_op = r\n        // if const_op < r, not possible\n        return const_op > r ? const_op - r : const_op + 1;\n      } else {\n        // XXX: (v % const_op) % const_op == v % const_op = r\n        return r;\n      }\n    case rgd::SRem:\n      if (rhs) {\n        if ((int64_t)const_op < (int64_t)r) {\n          WARNF(\"SRem rhs const_op < r\\n\");\n          return r;\n        }\n        return (int64_t)const_op > (int64_t)r ? (int64_t)const_op - (int64_t)r : (int64_t)const_op + 1;\n      } else {\n        return r;\n      }\n    case rgd::And: return (r & const_op) == r ? r : const_op; // XXX: when r = v & const_op, (r) & const_op = (v & const_op) & const_op = v & const_op = r\n    case rgd::Or: return (r | const_op) == r ? r : const_op;  // XXX: (a | b) | b == a | b\n    case rgd::Xor: return r ^ const_op; // v = r ^ const_op\n    case rgd::Shl:\n      if (rhs) {\n        if (const_op == 1) {\n          double log2 = std::log2(r);\n          return static_cast<uint64_t>(log2);\n        } else {\n          WARNF(\"unsupported Shl (rhs) const_op %lu\\n\", const_op);\n          return 0;\n        }\n      } else {\n        return r >> const_op; // v = r >> const_op\n      }\n    case rgd::LShr:\n      if (rhs) {\n        WARNF(\"LShr rhs not supported\\n\");\n        return r; // FIXME: r probably is not correct\n      }\n      return r << const_op; // v = r << diff\n    case rgd::AShr:\n      if (rhs) {\n        WARNF(\"AShr rhs not supported\");\n        return r; // FIXME: r probably is not correct\n      }\n      return (int64_t)r << const_op;\n    default: WARNF(\"Non-binary binop_value op %u!\\n\", kind);\n  }\n  return 0;\n}\n\nstatic uint64_t get_binop_value(std::shared_ptr<const Constraint> constraint,\n    const AstNode &node, uint64_t value, uint64_t &const_op, bool &rhs) {\n  auto &left = node.children(0);\n  auto &right = node.children(1);\n  uint64_t r = 0;\n  if (left.kind() == Constant) {\n    const_op = constraint->input_args[left.index()].second;\n    r = _get_binop_value(const_op, value, node.kind());\n    rhs = true;\n  } else if (right.kind() == Constant) {\n    const_op = constraint->input_args[right.index()].second;\n    r = _get_binop_value(value, const_op, node.kind());\n    rhs = false;\n  }\n  return r;\n}\n\nI2SSolver::I2SSolver(): matches(0), mismatches(0) {\n  binop_mask.set(rgd::Add);\n  binop_mask.set(rgd::Sub);\n  binop_mask.set(rgd::Mul);\n  binop_mask.set(rgd::UDiv);\n  binop_mask.set(rgd::SDiv);\n  binop_mask.set(rgd::URem);\n  binop_mask.set(rgd::SRem);\n  binop_mask.set(rgd::And);\n  // binop_mask.set(rgd::Or);\n  binop_mask.set(rgd::Xor);\n  // binop_mask.set(rgd::Shl);\n  binop_mask.set(rgd::LShr);\n  binop_mask.set(rgd::AShr);\n}\n\nsolver_result_t\nI2SSolver::solve_icmp(std::shared_ptr<const Constraint> const& c,\n                      std::unique_ptr<ConsMeta> const& cm,\n                      uint32_t comparison,\n                      const uint8_t *in_buf, size_t in_size,\n                      uint8_t *out_buf, size_t &out_size) {\n\n  uint64_t value = 0, value_r = 0;\n  uint64_t r = 0;\n  for (auto const& candidate : cm->i2s_candidates) {\n    size_t offset = candidate.first;\n    uint32_t bytes = candidate.second;\n    if (bytes > 8) {\n      // FIXME: support larger int size\n      continue;\n    }\n    auto atoi = c->atoi_info.find(offset);\n    if (likely(atoi == c->atoi_info.end())) {\n      // size can be not a power of 2\n      memcpy(&value, &in_buf[offset], bytes);\n      value_r = SWAP64(value) >> (64 - bytes * 8);\n      DEBUGF(\"i2s: try %lu, length %u = 0x%016lx, 0x%016lx, comparison = %d\\n\",\n          offset, bytes, value, value_r, comparison);\n      if (c->op1 == value) {\n        matches++;\n        r = get_i2s_value(comparison, c->op2, false);\n      } else if (c->op2 == value) {\n        matches++;\n        r = get_i2s_value(comparison, c->op1, true);\n      } else if (c->op1 == value_r) {\n        matches++;\n        r = get_i2s_value(comparison, c->op2, false);\n        r = SWAP64(r) >> (64 - bytes * 8);\n      } else if (c->op2 == value_r) {\n        matches++;\n        r = get_i2s_value(comparison, c->op1, true);\n        r = SWAP64(r) >> (64 - bytes * 8);\n      } else if ((binop_mask & c->ops).count() == 1) {\n        // try some simple binary operations\n        auto &left = c->get_root()->children(0);\n        auto &right = c->get_root()->children(1);\n        uint64_t const_op = 0;\n        uint64_t mask = (1ULL << (bytes * 8)) - 1;\n        uint16_t kind = 0;\n        // true if the input is on the right hand side of the comparison\n        bool rhs = false;\n        // true if the input is on the right hand side of the binary operation\n        // NOTE, not the right hand side of the comparison\n        bool bop_rhs = false;\n        // check reverse too\n        bool is_reversed = false;\n        // check if lhs of the comparison is a simple binary operation with a constant\n        if (isBinaryOperation(left.kind())) {\n          r = get_binop_value(c, left, value, const_op, bop_rhs);\n          r &= mask; // mask the result to avoid overflow\n          DEBUGF(\"i2s: binop (lhs) %lx (%d) %lx = %lx =? %lx\\n\", value, left.kind(), const_op, r, c->op1);\n          if (r == c->op1) {\n            // binop result matches op1 of the comparison\n            kind = left.kind();\n            rhs = false;\n          } else {\n            // check value_r\n            r = get_binop_value(c, left, value_r, const_op, bop_rhs);\n            r &= mask; // mask the result to avoid overflow\n            DEBUGF(\"i2s: binop (lhs) %lx (%d) %lx = %lx =? %lx\\n\", value_r, left.kind(), const_op, r, c->op1);\n            if (r == c->op1) {\n              kind = left.kind();\n              rhs = false;\n              is_reversed = true;\n            } else {\n              const_op = 0;\n            }\n          }\n        }\n        if (isBinaryOperation(right.kind())) {\n          r = get_binop_value(c, right, value, const_op, bop_rhs);\n          r &= mask; // mask the result to avoid overflow\n          DEBUGF(\"i2s: binop (rhs) %lx (%d) %lx = %lx =? %lx\\n\", value, right.kind(), const_op, r, c->op2);\n          if (r == c->op2) {\n            // binop result matches op2 of the comparison\n            kind = right.kind();\n            rhs = true;\n          } else {\n            // check value_r\n            r = get_binop_value(c, right, value_r, const_op, bop_rhs);\n            r &= mask; // mask the result to avoid overflow\n            DEBUGF(\"i2s: binop (lhs) %lx (%d) %lx = %lx =? %lx\\n\", value_r, left.kind(), const_op, r, c->op1);\n            if (r == c->op2) {\n              kind = right.kind();\n              rhs = true;\n              is_reversed = true;\n            } else {\n              const_op = 0;\n            }\n          }\n        }\n        if (const_op == 0) {\n          continue; // nothing matches next offset\n        }\n        matches++;\n        // get the expected value\n        r = get_i2s_value(comparison, rhs ? c->op1 : c->op2, rhs);\n        // apply the diff\n        r = _get_binop_value_r(r, const_op, kind, bop_rhs);\n        r &= mask; // mask the result to avoid overflow\n        // reverse the result if necessary\n        if (is_reversed) {\n          r = SWAP64(r) >> (64 - bytes * 8);\n        }\n      } else {\n        continue; // next offset\n      }\n      DEBUGF(\"i2s: %lu = 0x%lx\\n\", offset, r);\n      if (out_size == 0) memcpy(out_buf, in_buf, in_size); // make a copy\n      out_size = in_size;\n      memcpy(&out_buf[offset], &r, bytes);\n      return SOLVER_SAT;\n    } else {\n      // atoi\n      uint32_t base = std::get<1>(atoi->second);\n      uint32_t old_len = std::get<2>(atoi->second);\n      DEBUGF(\"i2s: try atoi %lu, base %u, old_len %u\\n\", offset, base, old_len);\n      long num = 0;\n      unsigned long unum = 0;\n      bool is_signed = false;\n      if (old_len > 0) {\n        char buf[old_len + 1];\n        memcpy(buf, &in_buf[offset], old_len);\n        buf[old_len] = 0;\n        is_signed = (buf[0] == '-');\n        unum = strtoul(buf, NULL, base); // all operands are unsgined in symsan\n      }\n      if (c->op1 == unum) {\n        matches++;\n        r = get_i2s_value(comparison, c->op2, false);\n      } else if (c->op2 == unum) {\n        matches++;\n        r = get_i2s_value(comparison, c->op1, true);\n      } else {\n        continue; // next offset\n      }\n      DEBUGF(\"i2s-atoi: %lu = %lx\\n\", offset, r);\n      const char *format = nullptr;\n      switch (base) {\n        case 2: format = \"%lb\"; break;\n        case 8: format = \"%lo\"; break;\n        case 10: format = is_signed ? \"%ld\" : \"%lu\"; break;\n        case 16: format = \"%lx\"; break;\n        default: {\n          WARNF(\"unsupported base %d\\n\", base);\n          continue;\n        }\n      }\n      const size_t max_len = 64; // FIXME: make configurable?\n      char *saved = nullptr;\n      size_t copy_len = in_size - offset - old_len;\n      if (out_size == 0) {\n        memcpy(out_buf, in_buf, offset);\n      } else {\n        copy_len = out_size - offset - old_len;\n        saved = (char*)malloc(copy_len);\n        memcpy(saved, (char*)out_buf + offset + old_len, copy_len);\n      }\n      // extend size as in cmplog\n      size_t num_len;\n      if (is_signed) {\n        num_len = snprintf((char*)out_buf + offset, max_len, format, (long)r);\n      } else {\n        num_len = snprintf((char*)out_buf + offset, max_len, format, r);\n      }\n      if (out_size == 0) {\n        memcpy(out_buf + offset + num_len, in_buf + offset + old_len, copy_len);\n        out_size = in_size + num_len - old_len;\n      } else {\n        memcpy((char*)out_buf + offset + num_len, saved, copy_len);\n        free(saved);\n        out_size += num_len - old_len;\n      }\n      return SOLVER_SAT;\n    }\n  }\n  return SOLVER_TIMEOUT;\n}\n\nsolver_result_t\nI2SSolver::solve_memcmp(std::shared_ptr<const Constraint> const& c,\n                        std::unique_ptr<ConsMeta> const& cm,\n                        const uint8_t *in_buf, size_t in_size,\n                        uint8_t *out_buf, size_t &out_size) {\n\n  DEBUGF(\"i2s: try memcmp\\n\");\n\n  size_t const_index = 0;\n  for (auto const& arg : c->input_args) {\n    if (!arg.first) break; // first constant arg\n    const_index++;\n  }\n  if (const_index == c->input_args.size()) {\n    // FIXME: only do memcmp(const, symbolic)\n    mismatches++;\n    return SOLVER_TIMEOUT;\n  }\n  if (cm->i2s_candidates.size() != 1) {\n    // FIXME: only support single i2s candidate\n    WARNF(\"only support single i2s candidate\\n\");\n    return SOLVER_TIMEOUT;\n  }\n  size_t offset = cm->i2s_candidates[0].first;\n  uint32_t size = cm->i2s_candidates[0].second;\n  if (size != c->local_map.size()) {\n    WARNF(\"input size mismatch\\n\");\n    return SOLVER_TIMEOUT;\n  }\n  // make a copy of the input if not already\n  if (out_size == 0) memcpy(out_buf, in_buf, in_size);\n  uint64_t value = 0;\n  int i = 0;\n  auto &right = c->get_root()->children(1);\n  if (likely(right.kind() == rgd::Read)) {\n    // the memcmp argument is directly from input\n    for (size_t o = offset; o < offset + size; o++) {\n      if (i == 0)\n        value = c->input_args[const_index].second;\n      uint8_t v = ((value >> i) & 0xff);\n      out_buf[o] = v;\n      DEBUGF(\"  %lu = %u\\n\", o, v);\n      i += 8;\n      if (i == 64) {\n        const_index++; // move on to the next 64-bit chunk\n        i = 0;\n      }\n    }\n    out_size = in_size;\n    return SOLVER_SAT;\n  } else {\n    // there could be transformations on the input\n    auto *info = __dfsan::get_label_info(c->get_root()->label());\n    uint64_t sample = info->op2.i;\n    uint16_t sample_len = info->size > 8 ? 8 : info->size;\n    uint8_t sample_buf[sample_len];\n    memcpy(sample_buf, &sample, sample_len);\n#if DEBUG\n    memcpy(&value, &in_buf[offset], size > 8 ? 8 : size);\n    DEBUGF(\"i2s: memcmp encoded: %016lx => %016lx\\n\", value, sample);\n#endif\n    uint8_t encode_val = 0, touppwer = 0, tolower = 0;\n\n    // we only have one sample, so we cannot to reliable guessing purely\n    // based on input-output pairs, instead, we leverage the symbolic AST\n    // to guide the guessing\n    uint16_t kind = 0;\n    for (uint16_t i = rgd::Add; i < rgd::Shl; ++i) {\n      if (i == rgd::Not || i == rgd::Neg || i == rgd::And || i == rgd::Or)\n        continue; // we cannot reverse bitwise And and Or\n      if (c->ops.test(i)) {\n        if (kind != 0) {\n          kind = 0;\n          break;\n        } else {\n          kind = i;\n        }\n      }\n    }\n    if (kind != 0) {\n      // XXX: always assumes const_op is the rhs?\n      encode_val = (uint8_t)_get_binop_value_r(sample_buf[0], in_buf[offset], kind, false);\n    } else {\n      for (auto i = 0; i < sample_len; ++i) {\n        // check simple encoding\n        tolower = ((in_buf[offset + i] | 0x20) == sample_buf[i]) ? 1 : 0;\n        touppwer = ((in_buf[offset + i] & 0x5f) == sample_buf[i]) ? 1 : 0;\n      }\n    }\n\n    if (encode_val) {\n      DEBUGF(\"i2s: memcmp try encode val = %02x, op = %d\\n\", encode_val, kind);\n      for (size_t o = offset; o < offset + size; o++) {\n        if (i == 0)\n          value = c->input_args[const_index].second;\n        uint8_t v = ((value >> i) & 0xff);\n        out_buf[o] = (uint8_t)_get_binop_value_r(v, encode_val, kind, false);\n        DEBUGF(\"  %lu = %u\\n\", o, v);\n        i += 8;\n        if (i == 64) {\n          const_index++; // move on to the next 64-bit chunk\n          i = 0;\n        }\n      }\n      out_size = in_size;\n      return SOLVER_SAT;\n    } else if (touppwer) {\n      DEBUGF(\"i2s: memcmp try touppwer\\n\");\n      for (size_t o = offset; o < offset + size; o++) {\n        if (i == 0)\n          value = c->input_args[const_index].second;\n        uint8_t v = ((value >> i) & 0xff);\n        out_buf[o] = v | 0x20;\n        DEBUGF(\"  %lu = %u\\n\", o, v);\n        i += 8;\n        if (i == 64) {\n          const_index++; // move on to the next 64-bit chunk\n          i = 0;\n        }\n      }\n      out_size = in_size;\n      return SOLVER_SAT;\n    } else if (tolower) {\n      DEBUGF(\"i2s: memcmp try tolower\\n\");\n      for (size_t o = offset; o < offset + size; o++) {\n        if (i == 0)\n          value = c->input_args[const_index].second;\n        uint8_t v = ((value >> i) & 0xff);\n        out_buf[o] = v & 0x5f;\n        DEBUGF(\"  %lu = %u\\n\", o, v);\n        i += 8;\n        if (i == 64) {\n          const_index++; // move on to the next 64-bit chunk\n          i = 0;\n        }\n      }\n      out_size = in_size;\n      return SOLVER_SAT;\n    } else {\n      return SOLVER_TIMEOUT;\n    }\n  }\n  return SOLVER_TIMEOUT;\n}\n\nsolver_result_t\nI2SSolver::solve(std::shared_ptr<SearchTask> task,\n                 const uint8_t *in_buf, size_t in_size,\n                 uint8_t *out_buf, size_t &out_size) {\n\n  solver_result_t ret = SOLVER_TIMEOUT;\n  size_t n = task->size();\n  DEBUGF(\"i2s: new task with %zu constraints\\n\", n);\n  out_size = 0; // use this to indicate whether a copy has been made\n  for (size_t i = 0; i < n; ++i) {\n    // iterate through all constraints, hoping the stacked mutations would work,\n    // instead of destroying each other\n    auto const& c = task->constraints(i);\n    auto const& cm = task->consmetas(i);\n    auto comparison = task->comparisons(i);\n    if (likely(isRelationalKind(comparison))) {\n      if (solve_icmp(c, cm, comparison, in_buf, in_size, out_buf, out_size) == SOLVER_SAT) {\n        // be optimistic, as long as there's one match, we should try the output\n        ret = SOLVER_SAT;\n      } else {\n        mismatches++;\n      }\n    } else if (comparison == rgd::Memcmp) {\n      if (solve_memcmp(c, cm, in_buf, in_size, out_buf, out_size) == SOLVER_SAT) {\n        // be optimistic, as long as there's one match, we should try the output\n        ret = SOLVER_SAT;\n      } else {\n        mismatches++;\n      }\n    } else if (comparison == rgd::MemcmpN) {\n      DEBUGF(\"i2s: try memcmpN\\n\");\n      // copy the matching bytes\n      if (out_size == 0) memcpy(out_buf, in_buf, in_size);\n      size_t offset = cm->i2s_candidates[0].first;\n      uint32_t size = cm->i2s_candidates[0].second;\n      out_buf[offset] = in_buf[offset] + 8;\n      out_size = in_size;\n      ret = SOLVER_SAT;\n    }\n  }\n\n  return ret;\n}"
  },
  {
    "path": "solvers/jigsaw/CMakeLists.txt",
    "content": "project(jigsaw CXX)\n\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -O3 -g -mcx16 -march=native -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free\")\nif (ASAN_BUILD)\n  set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fsanitize=address\")\nendif()\n\nadd_library(jigsaw STATIC\n  gd.cc\n  input.cc\n  grad.cc\n  jit.cc\n)\n\ntarget_include_directories(jigsaw PRIVATE\n  ${CMAKE_CURRENT_SOURCE_DIR}/../\n)\n\ntarget_link_libraries(jigsaw\n  tcmalloc\n  LLVM\n)\n"
  },
  {
    "path": "solvers/jigsaw/config.h",
    "content": "#ifndef CONFIG_H_\n#define CONFIG_H_\n#define MAX_NUM_MINIMAL_OPTIMA_ROUND 32\n#define MAX_EXEC_TIMES 1000\n#endif\n"
  },
  {
    "path": "solvers/jigsaw/gd.cc",
    "content": "#include <stdint.h>\n#include <assert.h>\n#include <iostream>\n\n#include \"jit.h\"\n#include \"input.h\"\n#include \"grad.h\"\n#include \"config.h\"\n#include \"ast.h\"\n#include \"task.h\"\n\nusing namespace rgd;\n\n#define DEBUG 0\n\n#define likely(x)       __builtin_expect(!!(x), 1)\n#define unlikely(x)     __builtin_expect(!!(x), 0)\n\n#define SWAP64(_x)                                                             \\\n  ({                                                                           \\\n                                                                               \\\n    uint64_t _ret = (_x);                                                           \\\n    _ret =                                                                     \\\n        (_ret & 0x00000000FFFFFFFF) << 32 | (_ret & 0xFFFFFFFF00000000) >> 32; \\\n    _ret =                                                                     \\\n        (_ret & 0x0000FFFF0000FFFF) << 16 | (_ret & 0xFFFF0000FFFF0000) >> 16; \\\n    _ret =                                                                     \\\n        (_ret & 0x00FF00FF00FF00FF) << 8 | (_ret & 0xFF00FF00FF00FF00) >> 8;   \\\n    _ret;                                                                      \\\n                                                                               \\\n  })\n\nstatic void dump_results(MutInput &input, std::shared_ptr<SearchTask> task) {\n  int i = 0;\n  for (auto it : task->inputs()) {\n    std::cout << \"index is \" << it.first << \" result is \" << (int)input.value[i] << std::endl;\n    i++;\n  }\n}\n\nstatic void dump_distances(std::vector<uint64_t> &distances) {\n  for (size_t i = 0; i < distances.size(); i++) {\n    std::cout << \"distance \" << i << \" is \" << distances[i] << std::endl;\n  }\n}\n\n\nstatic void add_results(MutInput &input, std::shared_ptr<SearchTask> task) {\n  int i = 0;\n  // since we used a trick (allow each byte to overflow and then use add instead\n  // of bitwise or to concatenate, so the overflow would be visible)\n  // to allow us to manipulate each byte individually during gradient descent,\n  // we need to do a bit more work to get the final result\n\n  // first, we order the inputs by their offset\n  std::map<uint32_t, uint64_t> ordered_inputs;\n  for (auto it : task->inputs()) {\n    ordered_inputs[it.first] = input.value[i];\n    i++;\n  }\n\n  // next, convert the ordered inputs to a vector for easier access\n  std::vector<std::pair<uint32_t, uint64_t> > ordered_inputs_v;\n  for (const auto& pair : ordered_inputs) {\n    ordered_inputs_v.push_back(pair);\n  }\n\n  // finally, we calculate the final result\n  uint32_t length = 1;\n  uint64_t result = 0;\n  uint32_t start = 0;\n  for (i = 0; i < ordered_inputs_v.size();) {\n    start = ordered_inputs_v[i].first;\n    result = ordered_inputs_v[i].second;\n    length = task->shapes(start);\n    if (length == 0) { ++i; continue; }\n    if (length <= 8) { // 8 bytes or less\n      // first, concatenate the bytes according to the shape\n      for (int j = 1; j < length; ++j) {\n        result += (ordered_inputs_v[i + j].second << (8 * j));\n      }\n      // then extract the correct values, little endian\n      for (int j = 0; j < length; ++j) {\n        task->solution[start + j] = (uint8_t)((result >> (8 * j)) & 0xff);\n      }\n    } else { // if it's too large, just copy the value\n      for (int j = 0; j < length; ++j) {\n        task->solution[start + j] = ordered_inputs_v[i + j].second;\n      }\n    }\n    i += length;\n  }\n}\n\n\nstatic inline uint64_t sat_inc(uint64_t base, uint64_t inc) {\n  return base + inc < base ? -1 : base + inc;\n}\n\n\nstatic uint32_t negate(uint32_t op) {\n  switch (op) {\n    case rgd::Equal: return rgd::Distinct;\n    case rgd::Distinct: return rgd::Equal;\n    case rgd::Sge: return rgd::Slt;\n    case rgd::Sgt:  return rgd::Sle;\n    case rgd::Sle:  return rgd::Sgt;\n    case rgd::Slt:  return rgd::Sge;\n    case rgd::Uge:  return rgd::Ult;\n    case rgd::Ugt:  return rgd::Ule;\n    case rgd::Ule:  return rgd::Ugt;\n    case rgd::Ult:  return rgd::Uge;\n    default: fprintf(stderr, \"Non-relational op!\\n\");\n  };\n  return 0;\n}\n\n\nstatic uint64_t get_distance(uint32_t comp, uint64_t a, uint64_t b) {\n  uint64_t dis = 0;\n  switch (comp) {\n    case rgd::Equal:\n      if (a >= b) dis = a - b;\n      else dis = b - a;\n      break;\n    case rgd::Distinct:\n      if (a == b) dis = 1;\n      else dis = 0;\n      break;\n    case rgd::Ult:\n      if (a < b) dis = 0;\n      else dis = sat_inc(a - b, 1);\n      break;\n    case rgd::Ule:\n      if (a <= b) dis = 0;\n      else dis = a - b;\n      break;\n    case rgd::Ugt:\n      if (a > b) dis = 0;\n      else dis = sat_inc(b - a, 1);\n      break;\n    case rgd::Uge:\n      if (a >= b) dis = 0;\n      else dis = b - a;\n      break;\n    case rgd::Slt:\n      if ((int64_t)a < (int64_t)b) return 0;\n      else dis = sat_inc(a - b, 1);\n      break;\n    case rgd::Sle:\n      if ((int64_t)a <= (int64_t)b) return 0;\n      else dis = a - b;\n      break;\n    case rgd::Sgt:\n      if ((int64_t)a > (int64_t)b) return 0;\n      else dis = sat_inc(b - a, 1);\n      break;\n    case rgd::Sge:\n      if ((int64_t)a >= (int64_t)b) return 0;\n      else dis = b - a;\n      break;\n    case rgd::Memcmp:\n      dis = a ^ 1;\n      break;\n    case rgd::MemcmpN:\n      dis = a;\n      break;\n    default:\n      fprintf(stderr, \"Non-relational op!\\n\");\n  }\n  return dis;\n}\n\n\nstatic uint64_t single_distance(MutInput &input, std::vector<uint64_t> &distances, std::shared_ptr<SearchTask> task, const uint32_t index) {\n  // only re-compute the distance of the constraints that are affected by the change\n  uint64_t res = 0;\n  for (uint32_t cons_id : task->cmap(index)) {\n    auto& c = task->constraints(cons_id);\n    auto& cm = task->consmetas(cons_id);\n    int arg_idx = 0;\n    for (auto const &arg : cm->input_args) {\n      if (arg.first) {// symbolic\n        task->scratch_args[RET_OFFSET + arg_idx] = input.value[arg.second];\n      } else {\n        task->scratch_args[RET_OFFSET + arg_idx] = arg.second;\n      }\n      ++arg_idx;\n    }\n    c->fn(task->scratch_args);\n    uint64_t dis = get_distance(cm->comparison, task->scratch_args[0], task->scratch_args[1]);\n    distances[cons_id] = dis;\n#if DEBUG\n    std::cout << \"single distance of constraint \" << cons_id << \" is \" << dis << std::endl;\n#endif\n    res = sat_inc(res, dis);\n  }\n  return res;\n}\n\n\nstatic uint64_t distance(MutInput &input, std::vector<uint64_t> &distances, std::shared_ptr<SearchTask> task) {\n  static int timeout = 0;\n  static int solved= 0;\n  uint64_t res = 0;\n\n  for (int i = 0, n = task->size(); i < n; i++) {\n    auto& c = task->constraints(i);\n    auto& cm = task->consmetas(i);\n    // mapping symbolic args\n    int arg_idx = 0;\n    for (auto const &arg : cm->input_args) {\n      if (arg.first) { // symbolic\n        task->scratch_args[RET_OFFSET + arg_idx] = input.value[arg.second];\n      } else {\n        task->scratch_args[RET_OFFSET + arg_idx] = arg.second;\n      }\n      ++arg_idx;\n    }\n    c->fn(task->scratch_args);\n    uint64_t dis = get_distance(cm->comparison, task->scratch_args[0], task->scratch_args[1]);\n    distances[i] = dis;\n    cm->op1 = task->scratch_args[0];\n    cm->op2 = task->scratch_args[1];\n#if DEBUG\n    std::cout << \"distance of constraint \" << i << \" is \" << dis << std::endl;\n#endif\n    res = sat_inc(res, dis);\n  }\n  if (res == 0) {\n    task->stopped = true;\n    task->solved = true;\n    //dump_results(input, task);\n    add_results(input, task);\n  }\n  task->attempts++;\n  if (task->attempts > MAX_EXEC_TIMES) {\n    task->stopped = true;\n    task->solved = false;\n  }\n  return res;\n}\n\n\nstatic void partial_derivative(MutInput &orig_input, const uint32_t index, uint64_t f0, bool *sign, bool* is_linear, uint64_t *val, std::shared_ptr<SearchTask> task) {\n\n  uint64_t orig_val = orig_input.value[index];\n  uint64_t delta = 1;\n  uint64_t f_plus = 0, f_minus = 0;\n  uint64_t single_dis;\n\n  // calculate f(x+delta)\n  for (delta = 1; delta < 256; delta = delta << 1) {\n    task->plus_distances = task->min_distances;\n    orig_input.update(index, true, delta);\n    single_dis = single_distance(orig_input, task->plus_distances, task, index);\n    if (single_dis == 0) { // well, we got lucky and found a solution\n      *sign = true;\n      *is_linear = false;\n      *val = 0;\n      return;\n    }\n    f_plus = 0;\n    for (int i = 0, n = task->size(); i < n; i++)\n      f_plus = sat_inc(f_plus, task->plus_distances[i]);\n\n    task->attempts += 1;\n    if (task->attempts > MAX_EXEC_TIMES)\n      task->stopped = true;\n    if (task->stopped) { *val = 0; return; }\n\n    if (f_plus == f0) { // if f(x+delta) == f(x), delta is not large enough\n      delta = delta << 1;\n    } else {\n      break;\n    }\n  }\n  orig_input.value[index] = orig_val; // restore the original value\n\n  // calculate f(x-delta)\n  for (delta = 1; delta < 256; delta = delta << 1) {\n    task->minus_distances = task->min_distances;\n    orig_input.update(index, false, delta);\n    single_dis = single_distance(orig_input, task->minus_distances, task, index);\n    if (single_dis == 0) { // well, we got lucky and found a solution\n      *sign = false;\n      *is_linear = false;\n      *val = 0;\n      return;\n    }\n    f_minus = 0;\n    for (int i = 0, n = task->size(); i < n; i++)\n      f_minus = sat_inc(f_minus, task->minus_distances[i]);\n\n    task->attempts += 1;\n    if (task->attempts > MAX_EXEC_TIMES)\n      task->stopped = true;\n    if (task->stopped) { *val = 0; return;}\n\n    if (f_minus == f0) { // if f(x-delta) == f(x), delta is not large enough\n      delta = delta << 1;\n    } else {\n      break;\n    }\n  }\n  orig_input.value[index] = orig_val; // restore the original value\n\n#if DEBUG\n  std::cout << \"calculating partial and f0 is \" << f0 << \" f_minus is \" << f_minus << \" and f_plus is \" << f_plus << std::endl;\n#endif\n\n  if (f_minus < f0) {\n    if (f_plus < f0) {\n      if (f_minus < f_plus) {\n        *sign = false;\n        *is_linear = false;\n        *val = f0 - f_minus;\n      } else { // f_minus >= f_plus\n        *sign = true;\n        *is_linear = false;\n        *val = f0 - f_plus;\n      }\n    } else { // f_plus >= f0\n      *sign = false;\n      *is_linear = ((f_minus != f0) && (f0 - f_minus == f_plus - f0));\n      *val = f0 - f_minus;\n    }\n  } else { // f_minus >= f0\n    if (f_plus < f0) {\n      *sign = true;\n      *is_linear = ((f_minus != f0) && (f_minus - f0 == f0 - f_plus));\n      *val = f0 - f_plus;\n    } else { // f_plus >= f0\n      // reached a local optimum\n      *sign = true;\n      *is_linear = false;\n      *val = 0;\n    }\n  }\n}\n\n\nstatic void compute_delta_all(MutInput &input, Grad &grad, size_t step) {\n  double fstep = (double)step;\n  int index = 0;\n  for (auto &gradu : grad.get_value()) {\n    double movement = gradu.pct * step;\n    input.update(index, gradu.sign, (uint64_t)movement);\n#if DEBUG\n    std::cout << \"compute_delta_all for index = \" << index\n              << \", sign = \" << gradu.sign\n              << \", move = \" << movement << std::endl;\n#endif\n    index++;\n  }\n}\n\n\nstatic void cal_gradient(MutInput &input, uint64_t f0, Grad &grad, std::shared_ptr<SearchTask> task) {\n  uint64_t max = 0;\n  uint32_t index = 0;\n  for (auto &gradu : grad.get_value()) {\n\n    if (task->stopped) {\n      break;\n    }\n    bool sign = false;\n    bool is_linear = false;\n    uint64_t val = 0;\n\n    partial_derivative(input, index, f0, &sign, &is_linear, &val, task);\n    if (val > max) {\n      max = val;\n    }\n#if DEBUG\n    std::cout << \"cal_gradient for index = \" << index << \", offset = \"\n              << task->inputs[index].first << \", val = \" << val << std::endl;\n#endif\n    //linear = linear && l;\n    gradu.sign = sign;\n    gradu.val = val;\n    index++;\n  }\n}\n\n\nstatic uint64_t descend(MutInput &input_min, MutInput &input, uint64_t f0, Grad &grad, std::shared_ptr<SearchTask> task) {\n  uint64_t f_last = f0;\n  input = input_min;\n  bool doDelta = false;\n  int deltaIdx = 0;\n\n  uint64_t vsum = grad.val_sum();\n\n  if (vsum > 0) {\n    auto guess_step = f0 / vsum;\n    compute_delta_all(input, grad, guess_step);\n    uint64_t f_new = distance(input, task->distances, task);\n    if (f_new >= f_last) {\n      input = input_min;\n    } else if (f_new == 0) {\n      // found a solution\n      task->stopped = true;\n      task->solved = true;\n      add_results(input, task);\n      return 0;\n    } else {\n      input_min = input;\n      f_last = f_new;\n      task->min_distances = task->distances;\n    }\n  } else {\n    task->distances = task->min_distances;\n  }\n\n  size_t step = 1;\n  while (true) {\n    while (true) {\n      if (task->stopped) {\n        return f_last;\n      }\n\n      uint64_t f_new = 0;\n      if (doDelta) {\n        double movement = grad.get_value()[deltaIdx].pct * (double)step;\n        input.update(deltaIdx, grad.get_value()[deltaIdx].sign, (uint64_t)movement);\n#if DEBUG\n        std::cout << \"update index = \" << deltaIdx << \", offset = \"\n                  << task->inputs[deltaIdx].first << \", sign = \"\n                  << grad.get_value()[deltaIdx].sign\n                  << \", movement = \" << movement << std::endl;\n#endif\n\n        uint64_t single_dis = single_distance(input, task->distances, task, deltaIdx);\n        for (int i = 0, n = task->size(); i < n; i++)\n          f_new = sat_inc(f_new, task->distances[i]);\n        task->attempts += 1;\n        if (task->attempts > MAX_EXEC_TIMES)\n          task->stopped = true;\n        if (single_dis == 0) {\n          // if we're doing delta and the single distance is 0\n          // we're done with the current index\n          break;\n        }\n\n      } else {\n        compute_delta_all(input, grad, step);\n        f_new = distance(input, task->distances, task);\n      }\n\n      if (f_new == 0) {\n        // found a solution\n        task->stopped = true;\n        task->solved = true;\n        add_results(input, task);\n        return 0;\n      } else if (f_new > f_last) { // use > to give the next larger step a chance\n        //if (f_new == UINTMAX_MAX)\n        break;\n      }\n\n      step *= 2;\n      input_min = input;\n      task->min_distances = task->distances;\n      f_last = f_new;\n    }\n\n    if (grad.len() == 1) {\n      break;\n    } else {\n      if (doDelta) deltaIdx++;\n      else { deltaIdx = 0; doDelta = true;}\n      while ((deltaIdx < grad.len()) && grad.get_value()[deltaIdx].pct < 0.01) {\n        deltaIdx++ ;\n      }\n      if (deltaIdx >= grad.len()) {\n        break;\n      }\n      input = input_min;\n      task->distances = task->min_distances;\n      step = 1;\n    }\n  }\n  return f_last;\n}\n\n\nstatic uint64_t get_i2s_value(uint32_t comp, uint64_t v, bool rhs) {\n  switch (comp) {\n    case rgd::Equal:\n    case rgd::Ule:\n    case rgd::Uge:\n    case rgd::Sle:\n    case rgd::Sge:\n      return v;\n    case rgd::Distinct:\n    case rgd::Ugt:\n    case rgd::Sgt:\n      if (rhs) return v - 1;\n      else return v + 1;\n    case rgd::Ult:\n    case rgd::Slt:\n      if (rhs) return v + 1;\n      else return v - 1;\n    default:\n      fprintf(stderr, \"Non-relational op!\\n\");\n  }\n  return v;\n}\n\n\nstatic uint64_t try_new_i2s_value(std::shared_ptr<const Constraint> const& c, uint32_t comparison, uint64_t value, std::shared_ptr<SearchTask> task) {\n  int i = 0;\n  for (auto const& [offset, lidx] : c->local_map) {\n    uint64_t v = ((value >> i) & 0xff);\n    task->scratch_args[RET_OFFSET + lidx] = v;\n    i += 8;\n  }\n  int arg_idx = 0;\n  for (auto const& arg : c->input_args) {\n    // NOTE: using the constaints input_args here (instead of the consmeta's)\n    // is fine because the constants are always the same\n    if (!arg.first) task->scratch_args[RET_OFFSET + arg_idx] = arg.second;\n    ++arg_idx;\n  }\n  c->fn(task->scratch_args);\n  return get_distance(comparison, task->scratch_args[0], task->scratch_args[1]);\n}\n\n\nstatic uint64_t try_i2s(MutInput &input_min, MutInput &temp_input, uint64_t f0, std::shared_ptr<SearchTask> task) {\n  temp_input = input_min;\n  bool updated = false;\n  for (int k = 0; k < task->size(); k++) {\n    auto& c = task->constraints(k);\n    auto& cm = task->consmetas(k);\n    if (task->min_distances[k]) {\n      if (likely(isRelationalKind(cm->comparison))) {\n        // check consecutive input bytes against comparison operands\n        // FIXME: add support for other input encodings\n        uint64_t input = 0, input_r, value = 0, dis = -1;\n        for (auto const& candidate : cm->i2s_candidates) {\n          const size_t offset = candidate.first;\n          const uint32_t size = candidate.second;\n          if (size > 8) {\n            continue;\n          }\n          int i = 0, t = size * 8;\n          for (size_t off = offset; off < offset + size; off++) {\n            const uint32_t lidx = c->local_map.at(off);\n            uint64_t v = input_min.get(cm->input_args[lidx].second);\n            input |= (v << i);\n            input_r |= (v << (t - i - 8));\n            i += 8;\n          }\n          if (input == cm->op1) {\n            value = get_i2s_value(cm->comparison, cm->op2, true);\n          } else if (input == cm->op2) {\n            value = get_i2s_value(cm->comparison, cm->op1, false);\n          } else {\n            goto try_reverse;\n          }\n\n          // test the new value\n          dis = try_new_i2s_value(c, cm->comparison, value, task);\n          if (dis == 0) {\n#if DEBUG\n            std::cerr << \"i2s updated c = \" << k << \" t = \" << t << \" input = \" << input\n                      << \" op1 = \" << cm->op1 << \" op2 = \" << cm->op2\n                      << \" cmp = \" << cm->comparison << \" value = \" << value\n                      << \" old-dis = \" << task->min_distances[k] << \" new-dis = \" << dis << std::endl;\n#endif\n            // successful, update the real inputs\n            i = 0;\n            for (size_t off = offset; off < offset + size; off++) {\n              const uint32_t lidx = c->local_map.at(off);\n              uint8_t v = ((value >> i) & 0xff);\n              temp_input.set(cm->input_args[lidx].second, v);\n              i += 8;\n            }\n            updated = true;\n            break; // one match per comparison\n          }\n\ntry_reverse:\n          // try reverse encoding\n          if (input_r == cm->op1) {\n            value = get_i2s_value(cm->comparison, cm->op2, true);\n          } else if (input_r == cm->op2) {\n            value = get_i2s_value(cm->comparison, cm->op1, false);\n          } else {\n            continue;\n          }\n\n          // test the new value\n          value = SWAP64(value) >> (64 - t); // reverse the value\n          dis = try_new_i2s_value(c, cm->comparison, value, task);\n          if (dis == 0) {\n            // successful, update the real inputs\n            i = 0;\n            for (size_t off = offset; off < offset + size; off++) {\n              const uint32_t lidx = c->local_map.at(off);\n              uint8_t v = ((value >> i) & 0xff);\n              // uint8_t v = ((value >> (t - i - 8)) & 0xff);\n              temp_input.set(cm->input_args[lidx].second, v);\n              i += 8;\n            }\n            updated = true;\n            break;\n          }\n        } // end foreach candidate\n      } else if (cm->comparison == rgd::Memcmp) {\n        size_t const_index = 0;\n        for (auto const& arg : c->input_args) {\n          if (!arg.first) break;\n          const_index++;\n        }\n        // memcmp(s1, s2) is i2s_feasible iff s1 is constant\n        // try copy s1 to s2\n        if (const_index == c->input_args.size()) continue;\n        if (cm->i2s_candidates.size() != 1) {\n          fprintf(stderr, \"memcmp should have only one candidate\\n\");\n          continue;\n        }\n        size_t offset = cm->i2s_candidates[0].first;\n        uint32_t size = cm->i2s_candidates[0].second;\n        if (size != c->local_map.size()) {\n          fprintf(stderr, \"input size mismatch\\n\");\n          continue;\n        }\n        int i = 0;\n        uint64_t value = 0;\n        for (size_t off = offset; off < offset + size; off++) {\n          const uint32_t lidx = c->local_map.at(off);\n          if (i == 0)\n            value = c->input_args[const_index].second;\n          uint8_t v = ((value >> i) & 0xff);\n          temp_input.set(cm->input_args[lidx].second, v);\n          i += 8;\n          if (i == 64) {\n            const_index++; // move on to the next 64-bit chunk\n            i = 0;\n          }\n        }\n        updated = true;\n      }\n    }\n  }\n  if (updated) {\n    uint64_t f_new = distance(temp_input, task->distances, task);\n    if (f_new < f0) {\n      // std::cout << \"i2s succeeded: \" << f0 << \" -> \" << f_new << std::endl;\n      input_min = temp_input;\n      task->min_distances = task->distances;\n      return f_new;\n    }\n  }\n  return f0;\n}\n\nstatic uint64_t repick_start_point(MutInput &input_min, std::shared_ptr<SearchTask> task) {\n  input_min.randomize();\n  uint64_t ret = distance(input_min, task->min_distances, task);\n  return ret;\n}\n\n\nstatic uint64_t reload_input(MutInput &input_min, std::shared_ptr<SearchTask> task) {\n  input_min.assign(task->inputs());\n#if 0\n  printf(\"assign realod\\n\");\n  for(auto itr : task->inputs) {\n    printf(\"offset %u value %u\\n\", itr.first, itr.second);\n  }\n#endif\n  uint64_t ret = distance(input_min, task->min_distances, task);\n  return ret;\n}\n\nbool rgd::gd_entry(std::shared_ptr<SearchTask> task) {\n  MutInput input(task->inputs_size());\n  MutInput scratch_input(task->inputs_size());\n  task->attempts = 0;\n\n  uint64_t f0 = reload_input(input, task);\n  f0 = try_i2s(input, scratch_input, f0, task);\n  if (task->stopped)\n    return task->solved;\n\n  if (f0 == UINTMAX_MAX)\n    return false;\n\n  int ep_i = 0;\n\n  Grad grad(input.len());\n\n  while (true) {\n    if (task->stopped) {\n      break;\n    }\n#if DEBUG\n    std::cout << \"<<< epoch=\" << ep_i << \" f0=\" << f0 << std::endl;\n    dump_results(input, task);\n    dump_distances(task->min_distances);\n#endif\n\n    cal_gradient(input, f0, grad, task);\n\n    int g_i = 0;\n    while (grad.max_val() == 0) {\n      if (g_i > MAX_NUM_MINIMAL_OPTIMA_ROUND) {\n        break;\n      }\n      if (task->stopped)\n        break;\n      g_i++;\n      //f0 = repick_start_point(input, f0, rng);\n      //f0 = reload_input(input);\n      f0 = repick_start_point(input, task);\n      f0 = try_i2s(input, scratch_input, f0, task);\n      if (task->stopped)\n        break;\n      grad.clear();\n      cal_gradient(input, f0, grad, task);\n    }\n    if (task->stopped || g_i > MAX_NUM_MINIMAL_OPTIMA_ROUND) {\n      //std::cout << \"trapped in local optimia for too long\" << std::endl;\n      break;\n    }\n    //TODO\n    grad.normalize();\n    f0 = descend(input, scratch_input, f0, grad, task);\n    ep_i += 1;\n    //if (ep_i == 2) break;\n  }\n\n  return task->solved;\n}\n"
  },
  {
    "path": "solvers/jigsaw/grad.cc",
    "content": "#include \"grad.h\"\n#include <stdint.h>\n#include <iostream>\n\nusing namespace rgd;\n\nGrad::Grad(size_t size) : grads(size) {\n}\n\nstd::vector<GradUnit>& Grad::get_value() {\n  return grads;\n}\n\n\nuint64_t Grad::max_val() {\n  uint64_t ret = 0;\n  for (auto gradu : grads) {\n    //std::cout << \"graud value is \" << gradu.val <<std::endl;\n    if (gradu.val > ret)\n      ret = gradu.val;\n  }\n  return ret;\n}\n\nvoid Grad::normalize() {\n  double max_grad = (double)max_val();\n  if (max_grad > 0.0) {\n    for(auto &grad : grads) {\n      grad.pct = 1.0 * ((double)grad.val / max_grad);\n    }\n  }\n}\n\nvoid Grad::clear() {\n  for (auto gradu : grads) {\n    gradu.val = 0;\n    gradu.pct = 0.0;\n  }\n}\n\nsize_t Grad::len() {\n  return grads.size();\n}\n\n\nuint64_t Grad::val_sum() {\n  uint64_t ret = 0;\n  for (auto gradu : grads) {\n    //FIXME: saturating_add\n    ret += gradu.val;\n  }\n  return ret;\n}\n\n"
  },
  {
    "path": "solvers/jigsaw/grad.h",
    "content": "#ifndef GRAD_H\n#define GRAD_H\n\n#include <vector>\n#include <stdint.h>\n#include <stddef.h>\n\nnamespace rgd {\n\nclass GradUnit {\npublic:\n  bool sign;\n  uint64_t val;\n  double pct;\n};\n\n\nclass Grad {\nprivate:\n  std::vector<GradUnit> grads;\npublic:\n  Grad(size_t size);\n  std::vector<GradUnit>& get_value();\n  uint64_t max_val();\n  void clear();\n  size_t len();\n  uint64_t val_sum();\n  void normalize();\n};\n\n}; // namespace rgd\n\n#endif\n"
  },
  {
    "path": "solvers/jigsaw/input.cc",
    "content": "#include \"input.h\"\n#include <ctime>\n#include <cstdlib>\n#include <iostream>\n#include <cstring>\n\nusing namespace rgd;\n\nvoid MutInput::update(size_t index, bool direction, uint64_t delta)\n{\n  if (direction)\n    value[index] += delta;\n  else\n    value[index] -= delta;\n}\n\nuint8_t MutInput::get_rand()\n{\n  uint8_t r = (uint8_t)r_val;\n  r_val >>= 8;\n  r_idx++;\n  if (r_idx == 4) {\n    random_r(&r_d, &r_val);\n    r_idx = 0;\n  }\n  return r;\n}\n\nvoid MutInput::assign(std::vector<std::pair<uint32_t,uint8_t>> const& input) {\n  for (int i = 0; i < size_; i++) {\n    value[i] = input[i].second;\n    //std::cout << \"randomize \" << i << \" and assign value \" << (int)value[i] << std::endl;\n  }\n}\n\nvoid MutInput::flip(size_t index, size_t bit_index) {\n  uint8_t val = value[index];\n  uint8_t mask = 1;\n  mask = mask << bit_index;\n  value[index] = val^mask;\n}\n\nvoid MutInput::set(const size_t index, uint8_t val)\n{\n  value[index] = (uint64_t)val;\n}\n\nuint64_t MutInput::len() {\n  return size_;\n}\n\nuint64_t MutInput::val_len() {\n  return size_;\n}\n\nMutInput& MutInput::operator=(const MutInput &other)\n{\n  MutInput::copy(this, &other);\n  return *this;\n}\n\nvoid MutInput::dump() {\n  // printf(\"dumping input and value size is %lu\\n\",value.size());\n  // for(auto i : value)\n  //   printf(\"%d, \",i);\n  // printf(\"\\n\");\n}\n\nvoid MutInput::randomize() {\n  for(int i=0;i<size_;i++) {\n    value[i] = (uint64_t)get_rand();\n    //std::cout << \"randomize \" << i << \" and assign value \" << (int)value[i] << std::endl;\n  }\n}\n\nuint8_t MutInput::get(const size_t i) {\n  return value[i];\n}\n\nMutInput::MutInput(size_t size) {\n  r_idx = 0;\n  value = (uint64_t*)malloc(size * sizeof(uint64_t));\n  size_ = size;\n  unsigned int seed;\n  //_rdseed32_step(&seed);\n  seed = (unsigned)time(NULL);\n  memset(r_s, 0, 256);\n  memset(&r_d, 0, sizeof(struct random_data));\n  initstate_r(seed, r_s, 256, &r_d);\n  random_r(&r_d, &r_val);\n}\n\nMutInput::~MutInput()\n{\n  if (value)\n    free(value);\n}\n"
  },
  {
    "path": "solvers/jigsaw/input.h",
    "content": "#ifndef _INPUT_H_\n#define _INPUT_H_\n#include <stddef.h>\n#include <stdint.h>\n#include <vector>\n#include <string.h>\n#include <stdlib.h>\n#include <utility>\n\nnamespace rgd {\n\nclass InputMeta {\npublic:\n  bool sign;\n  size_t offset;\n  size_t size;\n};\n\n\nclass MutInput {\npublic:\n  // std::vector<uint8_t> value;\n  uint64_t* value;\n  // std::vector<InputMeta> meta;\n  size_t size_;\n  size_t get_size();\n  MutInput(size_t size);\n  ~MutInput();\n  void dump();\n  uint64_t len();\n  uint64_t val_len();\n  void randomize();\n  //random\n  char r_s[256];\n  struct random_data r_d;\n  int32_t r_val;\n  int32_t r_idx;\n  uint8_t get_rand();\n\n  uint8_t get(const size_t i);\n  void update(size_t index, bool direction, uint64_t delta);\n  void flip(size_t index, size_t bit_index);\n  void set(const size_t index, uint8_t value);\n  void assign(std::vector<std::pair<uint32_t,uint8_t>> const& input);\n  MutInput& operator=(const MutInput &other);\n\n  static void copy(MutInput *dst, const MutInput *src)\n  {\n    uint64_t *dst_value = dst->value;\n    memcpy(dst, src, sizeof(MutInput));\n    if (!dst_value)\n      dst->value = (uint64_t*)malloc(src->size_ * sizeof(uint64_t));\n    else\n      dst->value = dst_value;\n    memcpy(dst->value, src->value, src->size_ * sizeof(uint64_t));\n  }\n};\n\n}; // namespace rgd\n\n#endif\n"
  },
  {
    "path": "solvers/jigsaw/jit.cc",
    "content": "#include \"llvm/ADT/APFloat.h\"\n#include \"llvm/ADT/STLExtras.h\"\n#include \"llvm/IR/BasicBlock.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/IRBuilder.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/IR/LegacyPassManager.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Type.h\"\n#include \"llvm/IR/Verifier.h\"\n#include \"llvm/Support/TargetSelect.h\"\n#include \"llvm/Target/TargetMachine.h\"\n#include \"llvm/Transforms/InstCombine/InstCombine.h\"\n#include \"llvm/Transforms/Scalar.h\"\n#include \"llvm/Transforms/Scalar/GVN.h\"\n\n#include <cassert>\n#include <iostream>\n#include <unordered_map>\n\n#include \"jit.h\"\n#include \"ast.h\"\n#include \"rgdJit.h\"\n\nusing namespace llvm;\nusing namespace rgd;\n\nstd::unique_ptr<GradJit> JIT;\n\nstatic llvm::Value* codegen(llvm::IRBuilder<> &Builder,\n    const AstNode* node,\n    std::map<size_t, uint32_t> const& local_map, llvm::Value* arg,\n    std::unordered_map<uint32_t, llvm::Value*> &value_cache) {\n\n  llvm::Value* ret = nullptr;\n  //std::cout << \"code gen and nargs is \" << nargs << std::endl;\n\n  auto itr = value_cache.find(node->label());\n  if (node->label() != 0\n      && itr != value_cache.end()) {\n    //std::cout << \" value cache hit and label is \" << node->label() << std::endl;\n    return itr->second;\n  }\n\n  llvm::Type *ArgTy = Builder.getInt64Ty();\n  switch (node->kind()) {\n    case rgd::Bool: {\n      // getTrue is actually 1 bit integer 1\n      if (node->boolvalue())\n        ret = llvm::ConstantInt::getTrue(Builder.getContext());\n      else\n        ret = llvm::ConstantInt::getFalse(Builder.getContext());\n      break;\n    }\n    case rgd::Constant: {\n      // The constant is now loading from arguments\n      uint32_t start = node->index();\n      uint32_t length = node->bits() / 8;\n\n      llvm::Value* idx[1];\n      // calculate the offset\n      idx[0] = llvm::ConstantInt::get(Builder.getInt32Ty(), start + RET_OFFSET);\n      ret = Builder.CreateGEP(ArgTy, arg, idx);\n      Type* CTy = llvm::Type::getIntNTy(Builder.getContext(), node->bits());\n      llvm::PointerType* CPTy = llvm::PointerType::getUnqual(CTy);\n      ret = Builder.CreateBitCast(ret, CPTy);\n      ret = Builder.CreateLoad(CTy, ret); // load length bytes at once\n      break;\n    }\n    case rgd::Read: {\n      uint32_t start = local_map.at(node->index());\n      size_t length = node->bits() / 8;\n      //std::cout << \"read index \" << start << \" length \" << length << std::endl;\n      llvm::Type *RTy = llvm::Type::getIntNTy(Builder.getContext(), node->bits());\n      llvm::Value* idx[1];\n      idx[0] = llvm::ConstantInt::get(Builder.getInt32Ty(), start + RET_OFFSET);\n      ret = Builder.CreateGEP(ArgTy, arg, idx);\n      ret = Builder.CreateLoad(ArgTy, ret);\n      ret = Builder.CreateZExtOrTrunc(ret, RTy);\n      for (uint32_t k = 1; k < length; k++) {\n        idx[0] = llvm::ConstantInt::get(Builder.getInt32Ty(),\n                                        start + k + RET_OFFSET);\n        llvm::Value* tmp = Builder.CreateGEP(ArgTy, arg, idx);\n        tmp = Builder.CreateLoad(ArgTy, tmp);\n        tmp = Builder.CreateZExtOrTrunc(tmp, RTy);\n        tmp = Builder.CreateShl(tmp, 8 * k);\n        ret = Builder.CreateAdd(ret, tmp);\n      }\n      break;\n    }\n    case rgd::Concat: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      uint32_t bits = rc1->bits() + rc2->bits(); \n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateOr(\n          Builder.CreateShl(\n            Builder.CreateZExt(c2,llvm::Type::getIntNTy(Builder.getContext(),bits)),\n            rc1->bits()),\n          Builder.CreateZExt(c1, llvm::Type::getIntNTy(Builder.getContext(), bits)));\n      break;\n    }\n    case rgd::Extract: {\n#if DEBUG\n      //std::cerr << \"Extract expression\" << std::endl;\n#endif\n      const AstNode* rc = &node->children(0);\n      llvm::Value* c = codegen(Builder, rc, local_map, arg, value_cache);\n      ret = Builder.CreateTrunc(\n          Builder.CreateLShr(c, node->index()),\n          llvm::Type::getIntNTy(Builder.getContext(), node->bits()));\n      break;\n    }\n    case rgd::ZExt: {\n#if DEBUG\n      // std::cerr << \"ZExt the bits is \" << node->bits() << std::endl;\n#endif\n      const AstNode* rc = &node->children(0);\n      llvm::Value* c = codegen(Builder, rc, local_map, arg, value_cache);\n      //FIXME: we may face ZEXT to boolean expr\n      ret = Builder.CreateZExtOrTrunc(c,\n          llvm::Type::getIntNTy(Builder.getContext(), node->bits()));\n      break;\n    }\n    case rgd::SExt: {\n#if DEBUG\n      // std::cerr << \"SExt the bits is \" << node->bits() << std::endl;\n#endif\n      const AstNode* rc = &node->children(0);\n      llvm::Value* c = codegen(Builder, rc,local_map, arg, value_cache);\n      ret = Builder.CreateSExt(c,\n          llvm::Type::getIntNTy(Builder.getContext(), node->bits()));\n      break;\n    }\n    case rgd::Add: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateAdd(c1, c2);\n      break;\n    }\n    case rgd::Sub: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateSub(c1, c2);\n      break;\n    }\n    case rgd::Mul: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateMul(c1, c2);\n      break;\n    }\n    case rgd::UDiv: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      llvm::Value* VA0 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 0);\n      llvm::Value* VA1 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 1);\n      // FIXME: this is a hack to avoid division by zero, but should use a better way\n      // FIXME: should record the divisor to avoid gradient vanish\n      llvm::Value* cond = Builder.CreateICmpEQ(c2, VA0);\n      llvm::Value* divisor = Builder.CreateSelect(cond, VA1, c2);\n      ret = Builder.CreateUDiv(c1, divisor);\n      break;\n    }\n    case rgd::SDiv: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      llvm::Value* VA0 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 0);\n      llvm::Value* VA1 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 1);\n      // FIXME: this is a hack to avoid division by zero, but should use a better way\n      // FIXME: should record the divisor to avoid gradient vanish\n      llvm::Value* cond = Builder.CreateICmpEQ(c2, VA0);\n      llvm::Value* divisor = Builder.CreateSelect(cond, VA1, c2);\n      ret = Builder.CreateSDiv(c1, divisor);\n      break;\n    }\n    case rgd::URem: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      llvm::Value* VA0 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 0);\n      llvm::Value* VA1 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 1);\n      // FIXME: this is a hack to avoid division by zero, but should use a better way\n      // FIXME: should record the divisor to avoid gradient vanish\n      llvm::Value* cond = Builder.CreateICmpEQ(c2, VA0);\n      llvm::Value* divisor = Builder.CreateSelect(cond, VA1, c2);\n      ret = Builder.CreateURem(c1, divisor);\n      break;\n    }\n    case rgd::SRem: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      llvm::Value* VA0 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 0);\n      llvm::Value* VA1 = llvm::ConstantInt::get(llvm::Type::getIntNTy(Builder.getContext(), node->bits()), 1);\n      // FIXME: this is a hack to avoid division by zero, but should use a better way\n      // FIXME: should record the divisor to avoid gradient vanish\n      llvm::Value* cond = Builder.CreateICmpEQ(c2, VA0);\n      llvm::Value* divisor = Builder.CreateSelect(cond, VA1, c2);\n      ret = Builder.CreateSRem(c1, divisor);\n      break;\n    }\n    case rgd::Neg: {\n      const AstNode* rc = &node->children(0);\n      llvm::Value* c = codegen(Builder, rc, local_map, arg, value_cache);\n      ret = Builder.CreateNeg(c);\n      break;\n    }\n    case rgd::Not: {\n      const AstNode* rc = &node->children(0);\n      llvm::Value* c = codegen(Builder, rc, local_map, arg, value_cache);\n      ret = Builder.CreateNot(c);\n      break;\n    }\n    case rgd::And: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateAnd(c1, c2);\n      break;\n    }\n    case rgd::Or: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateOr(c1, c2);\n      break;\n    }\n    case rgd::Xor: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateXor(c1, c2);\n      break;\n    }\n    case rgd::Shl: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateShl(c1, c2);\n      break;\n    }\n    case rgd::LShr: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateLShr(c1, c2);\n      break;\n    }\n    case rgd::AShr: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      ret = Builder.CreateAShr(c1, c2);\n      break;\n    }\n    // all the following ICmp expressions should be top level\n    case rgd::Equal:\n    case rgd::Distinct:\n    case rgd::Ult:\n    case rgd::Ule:\n    case rgd::Ugt:\n    case rgd::Uge:\n    case rgd::Slt:\n    case rgd::Sle:\n    case rgd::Sgt:\n    case rgd::Sge: {\n    // we don't really care about the comparison, just need to save the operands\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      // extend to 64-bit to avoid overflow\n      llvm::Value* c1e = Builder.CreateZExt(c1, Builder.getInt64Ty());\n      llvm::Value* c2e = Builder.CreateZExt(c2, Builder.getInt64Ty());\n\n      // save the comparison operands to the output args\n      // so it's easier to negate the condition\n      llvm::Value* idx[1];\n      idx[0] = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);\n      Builder.CreateStore(c1e,\n                          Builder.CreateGEP(Builder.getInt64Ty(), arg, idx));\n      idx[0] = llvm::ConstantInt::get(Builder.getInt32Ty(), 1);\n      Builder.CreateStore(c2e,\n                          Builder.CreateGEP(Builder.getInt64Ty(), arg, idx));\n\n      ret = nullptr;\n      break;\n    }\n    case rgd::Memcmp:\n    case rgd::MemcmpN: {\n      const AstNode* rc1 = &node->children(0);\n      const AstNode* rc2 = &node->children(1);\n      llvm::Value* c1 = codegen(Builder, rc1, local_map, arg, value_cache);\n      llvm::Value* c2 = codegen(Builder, rc2, local_map, arg, value_cache);\n      // c1 & c2 should be IntNty\n      llvm::Value* ret = Builder.CreateICmpEQ(c1, c2);\n      ret = Builder.CreateZExt(ret, Builder.getInt64Ty());\n\n      // just save the results\n      llvm::Value* idx[1];\n      idx[0] = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);\n      Builder.CreateStore(ret,\n                          Builder.CreateGEP(Builder.getInt64Ty(), arg, idx));\n\n      ret = nullptr;\n      break;\n    }\n    // this should never happen!\n    case rgd::LOr: {\n      throw std::invalid_argument(\"LOr expression\");\n      break;\n    }\n    case rgd::LAnd: {\n      throw std::invalid_argument(\"LAnd expression\");\n      break;\n    }\n    case rgd::LNot: {\n      throw std::invalid_argument(\"LNot expression\");\n      break;\n    }\n    case rgd::Ite: {\n      // don't handle ITE for now, doesn't work with GD\n      throw std::invalid_argument(\"Ite expression\");\n#if DEUBG\n      std::cerr << \"ITE expr codegen\" << std::endl;\n#endif\n#if 0\n      const AstNode* rcond = &node->children(0);\n      const AstNode* rtv = &node->children(1);\n      const AstNode* rfv = &node->children(2);\n      llvm::Value* cond = codegen(rcond, local_map, arg, value_cache);\n      llvm::Value* tv = codegen(rtv, local_map, arg, value_cache);\n      llvm::Value* fv = codegen(rfv, local_map, arg, value_cache);\n      ret = Builder.CreateSelect(cond, tv, fv);\n#endif\n      break;\n    }\n    default:\n      throw std::invalid_argument(\"unhandled expression\");\n      //printExpression(node);\n      break;\n  }\n\n  // add to cache\n  if (ret && node->label() != 0) {\n    value_cache.insert({node->label(), ret});\n  }\n\n  return ret; \n}\n\nint rgd::addFunction(const AstNode* node,\n    std::map<size_t,uint32_t> const& local_map,\n    uint64_t id) {\n\n  if ((!isRelationalKind(node->kind()) &&\n      node->kind() != rgd::Memcmp &&\n      node->kind() != rgd::MemcmpN)) {\n    std::cerr << \"non-relational expr\\n\";\n    return -1;\n  }\n\n  // Open a new module.\n  std::string moduleName = \"rgdjit_m\" + std::to_string(id);\n  std::string funcName = \"rgdjit_f\" + std::to_string(id);\n\n  auto TheCtx = std::make_unique<llvm::LLVMContext>();\n  auto TheModule = std::make_unique<Module>(moduleName, *TheCtx);\n  TheModule->setDataLayout(JIT->getDataLayout());\n  llvm::IRBuilder<> Builder(*TheCtx);\n\n  std::vector<llvm::Type*> input_type(1,\n      llvm::PointerType::getUnqual(Builder.getInt64Ty()));\n  llvm::FunctionType *funcType;\n  funcType = llvm::FunctionType::get(Builder.getVoidTy(), input_type, false);\n  auto *fooFunc = llvm::Function::Create(funcType, llvm::Function::ExternalLinkage,\n      funcName, TheModule.get());\n  auto *po = llvm::BasicBlock::Create(Builder.getContext(), \"entry\", fooFunc);\n  Builder.SetInsertPoint(po);\n  uint32_t idx = 0;\n\n  auto args = fooFunc->arg_begin();\n  llvm::Value* var = &(*args);\n  std::unordered_map<uint32_t, llvm::Value*> value_cache;\n  llvm::Value* body = nullptr;\n  try {\n    body = codegen(Builder, node, local_map, var, value_cache);\n  } catch (std::invalid_argument &e) {\n    std::cerr << \"Invalid node: \" << e.what() << std::endl;\n    return -1;\n  }\n  if (body != nullptr) {\n    std::cerr << \"non-comparison expr\\n\";\n    return -1;\n  }\n  Builder.CreateRet(body);\n\n  llvm::raw_ostream *stream = &llvm::outs();\n  llvm::verifyFunction(*fooFunc, stream);\n#if DEBUG\n  // TheModule->print(llvm::errs(), nullptr);\n#endif\n\n  JIT->addModule(std::move(TheModule), std::move(TheCtx));\n\n  return 0;\n}\n\ntest_fn_type rgd::performJit(uint64_t id) {\n  std::string funcName = \"rgdjit_f\" + std::to_string(id);\n  auto ExprSymbol = JIT->lookup(funcName).get();\n  auto func = (test_fn_type)ExprSymbol.getAddress();\n  return func;\n}\n"
  },
  {
    "path": "solvers/jigsaw/jit.h",
    "content": "#ifndef JIGSAW_H_\n#define JIGSAW_H_\n\n#include <memory>\n\n#include \"ast.h\"\n#include \"task.h\"\n\nnamespace rgd {\n\nint addFunction(const AstNode* node,\n    std::map<size_t, uint32_t> const& local_map,\n    uint64_t id);\n\ntest_fn_type performJit(uint64_t id);\n\nbool gd_entry(std::shared_ptr<SearchTask> task);\n\n}\n\n#endif"
  },
  {
    "path": "solvers/jigsaw/rgdJit.h",
    "content": "#ifndef GRAD_JIT_H\n#define GRAD_JIT_H\n\n#include \"llvm/ADT/StringRef.h\"\n#include \"llvm/ExecutionEngine/JITSymbol.h\"\n#include \"llvm/ExecutionEngine/Orc/CompileUtils.h\"\n#include \"llvm/ExecutionEngine/Orc/Core.h\"\n#include \"llvm/ExecutionEngine/Orc/ExecutionUtils.h\"\n#include \"llvm/ExecutionEngine/Orc/ExecutorProcessControl.h\"\n#include \"llvm/ExecutionEngine/Orc/IRCompileLayer.h\"\n#include \"llvm/ExecutionEngine/Orc/IRTransformLayer.h\"\n#include \"llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h\"\n#include \"llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h\"\n#include \"llvm/ExecutionEngine/SectionMemoryManager.h\"\n#include \"llvm/IR/DataLayout.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/IR/LegacyPassManager.h\"\n#include \"llvm/Transforms/InstCombine/InstCombine.h\"\n#include \"llvm/Transforms/Scalar.h\"\n#include \"llvm/Transforms/Scalar/GVN.h\"\n\n#include <algorithm>\n#include <map>\n#include <memory>\n#include <set>\n#include <string>\n#include <vector>\n\nnamespace rgd {\n\nclass GradJit {\nprivate:\n  std::unique_ptr<llvm::orc::ExecutionSession> ES;\n\n  llvm::DataLayout DL;\n  llvm::orc::MangleAndInterner Mangle;\n\n  llvm::orc::RTDyldObjectLinkingLayer ObjectLayer;\n  llvm::orc::IRCompileLayer CompileLayer;\n  llvm::orc::IRTransformLayer OptimizeLayer;\n\n  llvm::orc::JITDylib &MainJD;\n\npublic:\n  GradJit(std::unique_ptr<llvm::orc::ExecutionSession> ES,\n          llvm::orc::JITTargetMachineBuilder JTMB, llvm::DataLayout DL)\n      : ES(std::move(ES)), DL(std::move(DL)), Mangle(*this->ES, this->DL),\n        ObjectLayer(*this->ES,\n            []() { return std::make_unique<llvm::SectionMemoryManager>(); }),\n        CompileLayer(*this->ES, ObjectLayer,\n            std::make_unique<llvm::orc::ConcurrentIRCompiler>(std::move(JTMB))),\n        OptimizeLayer(*this->ES, CompileLayer, optimizeModule),\n        MainJD(this->ES->createBareJITDylib(\"main\")) {\n    MainJD.addGenerator(\n        cantFail(llvm::orc::DynamicLibrarySearchGenerator::GetForCurrentProcess(\n            DL.getGlobalPrefix())));\n  }\n\n  ~GradJit() {\n    if (auto Err = ES->endSession())\n      ES->reportError(std::move(Err));\n  }\n\n  static llvm::Expected<std::unique_ptr<GradJit>> Create() {\n    auto EPC = llvm::orc::SelfExecutorProcessControl::Create();\n    if (!EPC) {\n      llvm::errs() << \"Cannot create EPC: \" << EPC.takeError() << \"\\n\";\n      return EPC.takeError();\n    }\n\n    auto ES = std::make_unique<llvm::orc::ExecutionSession>(std::move(*EPC));\n\n    llvm::orc::JITTargetMachineBuilder JTMB(\n        ES->getExecutorProcessControl().getTargetTriple());\n\n    auto DL = JTMB.getDefaultDataLayoutForTarget();\n    if (!DL) {\n      llvm::errs() << \"Cannot get default DL for target: \"\n                   << DL.takeError() << \"\\n\";\n      return DL.takeError();\n    }\n\n    return std::make_unique<GradJit>(std::move(ES), std::move(JTMB),\n                                     std::move(*DL));\n  }\n\n  const llvm::DataLayout &getDataLayout() const { return DL; }\n\n  llvm::orc::JITDylib &getMainJITDylib() { return MainJD; }\n\n  void addModule(std::unique_ptr<llvm::Module> M,\n                 std::unique_ptr<llvm::LLVMContext> ctx) {\n    auto RT = MainJD.getDefaultResourceTracker();\n    cantFail(OptimizeLayer.add(RT,\n        llvm::orc::ThreadSafeModule(std::move(M), std::move(ctx))));\n  }\n\n  llvm::Expected<llvm::JITEvaluatedSymbol> lookup(llvm::StringRef Name) {\n    return ES->lookup({&MainJD}, Mangle(Name.str()));\n  }\n\nprivate:\n  static llvm::Expected<llvm::orc::ThreadSafeModule>\n  optimizeModule(llvm::orc::ThreadSafeModule TSM,\n                 const llvm::orc::MaterializationResponsibility &R) {\n    TSM.withModuleDo([](llvm::Module &M) {\n      // Create a function pass manager.\n      auto FPM = std::make_unique<llvm::legacy::FunctionPassManager>(&M);\n\n      // Add some optimizations.\n      FPM->add(llvm::createInstructionCombiningPass());\n      FPM->add(llvm::createReassociatePass());\n      FPM->add(llvm::createGVNPass());\n      FPM->add(llvm::createInstSimplifyLegacyPass());\n      FPM->doInitialization();\n\n      // Run the optimizations over all functions in the module being added to\n      // the JIT.\n      for (auto &F : M)\n        FPM->run(F);\n    });\n\n    return std::move(TSM);\n  }\n};\n\n} // namespace rgd\n\n#endif // GRAD_JIT_H\n\n"
  },
  {
    "path": "solvers/jit-solver.cpp",
    "content": "#include \"llvm/Support/TargetSelect.h\"\n#include \"llvm/Target/TargetMachine.h\"\n\n#include \"solver.h\"\n#include \"ast.h\"\n#include \"jigsaw/rgdJit.h\"\n#include \"jigsaw/jit.h\"\n#include \"wheels/lockfreehash/lprobe/hash_table.h\"\n\nusing namespace rgd;\n\n#define DEBUG 0\n\n#if !DEBUG\n#undef DEBUGF\n#define DEBUGF(_str...) do { } while (0)\n#elif !defined (DEBUGF)\n#define DEBUGF(_str...) do { fprintf(stderr, _str); } while (0)\n#endif\n\n#ifndef WARNF\n#define WARNF(_str...) do { fprintf(stderr, _str); } while (0)\n#endif\n\nstatic const uint64_t kUsToS = 1000000;\n\nstatic uint64_t getTimeStamp() {\n  struct timeval tv;\n  gettimeofday(&tv, NULL);\n  return tv.tv_sec * kUsToS + tv.tv_usec;\n}\n\nextern std::unique_ptr<GradJit> JIT;\n\nstruct myKV {\n  std::shared_ptr<AstNode> node;\n  test_fn_type fn;\n  myKV(std::shared_ptr<AstNode> anode, test_fn_type f) : node(anode), fn(f) {}\n};\n\nstruct myHash {\n  using eType = struct myKV*;\n  using kType = std::shared_ptr<AstNode>;\n  eType empty() {return nullptr;}\n  kType getKey(eType v) {return v->node;}\n  int hash(kType v) {return v->hash();} //hash64_2(v);}\n  //int hash(kType v) {return hash64_2(v);}\n  //int cmp(kType v, kType b) {return (v > b) ? 1 : ((v == b) ? 0 : -1);}\n  int cmp(kType v, kType b) {return (isEqualAst(*v,*b)) ? 0 : -1;}\n  bool replaceQ(eType, eType) {return 0;}\n  eType update(eType v, eType) {return v;}\n  bool cas(eType* p, eType o, eType n) {return pbbs::atomic_compare_and_swap(p, o, n);}\n};\n\nstatic pbbs::Table<myHash> fCache(8000016, myHash(), 1.3);\n\nJITSolver::JITSolver(): uuid(0) {\n  llvm::InitializeNativeTarget();\n  llvm::InitializeNativeTargetAsmPrinter();\n  llvm::InitializeNativeTargetAsmParser();\n\n  JIT = std::move(GradJit::Create().get());\n}\n\nsolver_result_t\nJITSolver::solve(std::shared_ptr<SearchTask> task,\n                 const uint8_t *in_buf, size_t in_size,\n                 uint8_t *out_buf, size_t &out_size) {\n\n  auto base_task = task->base_task;\n  uint64_t start;\n  while (base_task != nullptr) {\n    // no need to solve\n    if (base_task->skip_next) {\n      DEBUGF(\"skipping task\\n\");\n      task->skip_next = true; // set the flag for following tasks\n      out_size = in_size;\n      memcpy(out_buf, in_buf, in_size);\n      if (base_task->solved) {\n        for (auto const &[offset, value] : base_task->solution) {\n          out_buf[offset] = value;\n        }\n        return SOLVER_SAT;\n      } else {\n        return SOLVER_UNSAT;\n      }\n    } else if (base_task->solved) {\n      task->load_hint();\n    }\n    base_task = base_task->base_task;\n  }\n\n  for (size_t i = 0, n = task->size(); i < n; i++) {\n    auto &c = task->constraints(i);\n    DEBUGF(\"process constraint %d (fn=%p)\\n\", c->ast->label(), c->fn);\n    // jit the AST into a native function if haven't done so\n    if (c->fn == nullptr) {\n      struct myKV *res = fCache.find(c->ast);\n      if (res == nullptr) {\n        cache_misses++;\n        DEBUGF(\"jit constraint %d\\n\", c->ast->label());\n        uint64_t id = ++uuid;\n        start = getTimeStamp();\n        if (addFunction(c->get_root(), c->local_map, id) != 0) {\n          WARNF(\"failed to add function\\n\");\n          return SOLVER_ERROR;\n        }\n        process_time += (getTimeStamp() - start);\n        start = getTimeStamp();\n        auto fn = performJit(id);\n        jit_time += (getTimeStamp() - start);\n        auto kv = new struct myKV(c->ast, fn);\n        if (!fCache.insert(kv))\n          delete kv;\n        const_cast<Constraint*>(c.get())->fn = fn; // XXX: workaround, no concurrent access\n      } else {\n        cache_hits++;\n        const_cast<Constraint*>(c.get())->fn = res->fn; // XXX: workaround\n      }\n    }\n  }\n\n  // solve the task\n  start = getTimeStamp();\n  bool res = gd_entry(task);\n  solving_time += (getTimeStamp() - start);\n  if (res) {\n    DEBUGF(\"solved\\n\");\n    out_size = in_size;\n    memcpy(out_buf, in_buf, in_size);\n    for (auto const &[offset, value] : task->solution) {\n      DEBUGF(\"generate_input offset:%zu => %u\\n\", offset, value);\n      out_buf[offset] = value;\n    }\n    // handle atoi bytes\n    if (!task->atoi_info().empty()) {\n      // if there are atoi bytes, handle them\n      for (auto const &[offset, info] : task->atoi_info()) {\n        uint64_t val = 0;\n        uint32_t length = std::get<0>(info);\n        memcpy(out_buf + offset, in_buf + offset, length); // restore??\n        for (auto i = length; i != 0; --i) {\n          DEBUGF(\"generate_input atoi offset:%d => %lu\\n\", offset + i - 1, val);\n          auto itr = task->solution.find(offset + i - 1);\n          assert(itr != task->solution.end());\n          val |= itr->second << (8 * (i - 1));\n        }\n        uint32_t base = std::get<1>(info);\n        uint32_t orig_len = std::get<2>(info);\n        DEBUGF(\"generate_input atoi offset:%d => %lu, base = %d, original len = %d\\n\",\n            offset, val, base, orig_len);\n        const char *format = nullptr;\n        switch (base) {\n          case 2: format = \"%lb\"; break;\n          case 8: format = \"%lo\"; break;\n          case 10: format = \"%ld\"; break;\n          case 16: format = \"%lx\"; break;\n          default: WARNF(\"unsupported base %d\\n\", base);\n        }\n        if (format) {\n          snprintf((char*)out_buf + offset, in_size - offset, format, val);\n        }\n      }\n    }\n    num_solved++;\n    return SOLVER_SAT;\n  } else {\n    DEBUGF(\"timeout\\n\");\n    num_timeout++;\n    return SOLVER_TIMEOUT;\n  }\n}\n\nvoid JITSolver::print_stats(int fd) {\n  dprintf(fd, \"JIT solver stats:\\n\");\n  dprintf(fd, \"  cache hits: %lu\\n\", cache_hits.load());\n  dprintf(fd, \"  cache misses: %lu\\n\", cache_misses.load());\n  dprintf(fd, \"  num solved: %lu\\n\", num_solved.load());\n  dprintf(fd, \"  num timeout: %lu\\n\", num_timeout.load());\n  dprintf(fd, \"  process time: %lu\\n\", process_time.load());\n  dprintf(fd, \"  jit  time: %lu\\n\", jit_time.load());\n  dprintf(fd, \"  solving time: %lu\\n\", solving_time.load());\n}\n"
  },
  {
    "path": "solvers/wheels/concurrentqueue/queue.h",
    "content": "// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue.\n// An overview, including benchmark results, is provided here:\n//     http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++\n// The full design is also described in excruciating detail at:\n//    http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue\n\n// Simplified BSD license:\n// Copyright (c) 2013-2020, Cameron Desrochers.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without modification,\n// are permitted provided that the following conditions are met:\n//\n// - Redistributions of source code must retain the above copyright notice, this list of\n// conditions and the following disclaimer.\n// - Redistributions in binary form must reproduce the above copyright notice, this list of\n// conditions and the following disclaimer in the documentation and/or other materials\n// provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY\n// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL\n// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Also dual-licensed under the Boost Software License (see LICENSE.md)\n\n#pragma once\n\n#if defined(__GNUC__)\n// Disable -Wconversion warnings (spuriously triggered when Traits::size_t and\n// Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings\n// upon assigning any computed values)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wconversion\"\n\n#ifdef MCDBGQ_USE_RELACY\n#pragma GCC diagnostic ignored \"-Wint-to-pointer-cast\"\n#endif\n#endif\n\n#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17)\n// VS2019 with /W4 warns about constant conditional expressions but unless /std=c++17 or higher\n// does not support `if constexpr`, so we have no choice but to simply disable the warning\n#pragma warning(push)\n#pragma warning(disable: 4127)  // conditional expression is constant\n#endif\n\n#if defined(__APPLE__)\n#include \"TargetConditionals.h\"\n#endif\n\n#ifdef MCDBGQ_USE_RELACY\n#include \"relacy/relacy_std.hpp\"\n#include \"relacy_shims.h\"\n// We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations.\n// We'll override the default trait malloc ourselves without a macro.\n#undef new\n#undef delete\n#undef malloc\n#undef free\n#else\n#include <atomic>\t\t// Requires C++11. Sorry VS2010.\n#include <cassert>\n#endif\n#include <cstddef>              // for max_align_t\n#include <cstdint>\n#include <cstdlib>\n#include <type_traits>\n#include <algorithm>\n#include <utility>\n#include <limits>\n#include <climits>\t\t// for CHAR_BIT\n#include <array>\n#include <thread>\t\t// partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading\n\n// Platform-specific definitions of a numeric thread ID type and an invalid value\nnamespace moodycamel { namespace details {\n\ttemplate<typename thread_id_t> struct thread_id_converter {\n\t\ttypedef thread_id_t thread_id_numeric_size_t;\n\t\ttypedef thread_id_t thread_id_hash_t;\n\t\tstatic thread_id_hash_t prehash(thread_id_t const& x) { return x; }\n\t};\n} }\n#if defined(MCDBGQ_USE_RELACY)\nnamespace moodycamel { namespace details {\n\ttypedef std::uint32_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id  = 0xFFFFFFFFU;\n\tstatic const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU;\n\tstatic inline thread_id_t thread_id() { return rl::thread_index(); }\n} }\n#elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__)\n// No sense pulling in windows.h in a header, we'll manually declare the function\n// we use and rely on backwards-compatibility for this not to break\nextern \"C\" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void);\nnamespace moodycamel { namespace details {\n\tstatic_assert(sizeof(unsigned long) == sizeof(std::uint32_t), \"Expected size of unsigned long to be 32 bits on Windows\");\n\ttypedef std::uint32_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id  = 0;\t\t\t// See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx\n\tstatic const thread_id_t invalid_thread_id2 = 0xFFFFFFFFU;\t// Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread IDs are presently multiples of 4.\n\tstatic inline thread_id_t thread_id() { return static_cast<thread_id_t>(::GetCurrentThreadId()); }\n} }\n#elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE)\nnamespace moodycamel { namespace details {\n\tstatic_assert(sizeof(std::thread::id) == 4 || sizeof(std::thread::id) == 8, \"std::thread::id is expected to be either 4 or 8 bytes\");\n\t\n\ttypedef std::thread::id thread_id_t;\n\tstatic const thread_id_t invalid_thread_id;         // Default ctor creates invalid ID\n\n\t// Note we don't define a invalid_thread_id2 since std::thread::id doesn't have one; it's\n\t// only used if MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is defined anyway, which it won't\n\t// be.\n\tstatic inline thread_id_t thread_id() { return std::this_thread::get_id(); }\n\n\ttemplate<std::size_t> struct thread_id_size { };\n\ttemplate<> struct thread_id_size<4> { typedef std::uint32_t numeric_t; };\n\ttemplate<> struct thread_id_size<8> { typedef std::uint64_t numeric_t; };\n\n\ttemplate<> struct thread_id_converter<thread_id_t> {\n\t\ttypedef thread_id_size<sizeof(thread_id_t)>::numeric_t thread_id_numeric_size_t;\n#ifndef __APPLE__\n\t\ttypedef std::size_t thread_id_hash_t;\n#else\n\t\ttypedef thread_id_numeric_size_t thread_id_hash_t;\n#endif\n\n\t\tstatic thread_id_hash_t prehash(thread_id_t const& x)\n\t\t{\n#ifndef __APPLE__\n\t\t\treturn std::hash<std::thread::id>()(x);\n#else\n\t\t\treturn *reinterpret_cast<thread_id_hash_t const*>(&x);\n#endif\n\t\t}\n\t};\n} }\n#else\n// Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475\n// In order to get a numeric thread ID in a platform-independent way, we use a thread-local\n// static variable's address as a thread identifier :-)\n#if defined(__GNUC__) || defined(__INTEL_COMPILER)\n#define MOODYCAMEL_THREADLOCAL __thread\n#elif defined(_MSC_VER)\n#define MOODYCAMEL_THREADLOCAL __declspec(thread)\n#else\n// Assume C++11 compliant compiler\n#define MOODYCAMEL_THREADLOCAL thread_local\n#endif\nnamespace moodycamel { namespace details {\n\ttypedef std::uintptr_t thread_id_t;\n\tstatic const thread_id_t invalid_thread_id  = 0;\t\t// Address can't be nullptr\n\tstatic const thread_id_t invalid_thread_id2 = 1;\t\t// Member accesses off a null pointer are also generally invalid. Plus it's not aligned.\n\tinline thread_id_t thread_id() { static MOODYCAMEL_THREADLOCAL int x; return reinterpret_cast<thread_id_t>(&x); }\n} }\n#endif\n\n// Constexpr if\n#ifndef MOODYCAMEL_CONSTEXPR_IF\n#if (defined(_MSC_VER) && defined(_HAS_CXX17) && _HAS_CXX17) || __cplusplus > 201402L\n#define MOODYCAMEL_CONSTEXPR_IF if constexpr\n#define MOODYCAMEL_MAYBE_UNUSED [[maybe_unused]]\n#else\n#define MOODYCAMEL_CONSTEXPR_IF if\n#define MOODYCAMEL_MAYBE_UNUSED\n#endif\n#endif\n\n// Exceptions\n#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED\n#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__))\n#define MOODYCAMEL_EXCEPTIONS_ENABLED\n#endif\n#endif\n#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED\n#define MOODYCAMEL_TRY try\n#define MOODYCAMEL_CATCH(...) catch(__VA_ARGS__)\n#define MOODYCAMEL_RETHROW throw\n#define MOODYCAMEL_THROW(expr) throw (expr)\n#else\n#define MOODYCAMEL_TRY MOODYCAMEL_CONSTEXPR_IF (true)\n#define MOODYCAMEL_CATCH(...) else MOODYCAMEL_CONSTEXPR_IF (false)\n#define MOODYCAMEL_RETHROW\n#define MOODYCAMEL_THROW(expr)\n#endif\n\n#ifndef MOODYCAMEL_NOEXCEPT\n#if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED)\n#define MOODYCAMEL_NOEXCEPT\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true\n#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800\n// VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-(\n// We have to assume *all* non-trivial constructors may throw on VS2012!\n#define MOODYCAMEL_NOEXCEPT _NOEXCEPT\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference<valueType>::value && std::is_move_constructible<type>::value ? std::is_trivially_move_constructible<type>::value : std::is_trivially_copy_constructible<type>::value)\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference<valueType>::value && std::is_move_assignable<type>::value ? std::is_trivially_move_assignable<type>::value || std::is_nothrow_move_assignable<type>::value : std::is_trivially_copy_assignable<type>::value || std::is_nothrow_copy_assignable<type>::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr))\n#elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900\n#define MOODYCAMEL_NOEXCEPT _NOEXCEPT\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference<valueType>::value && std::is_move_constructible<type>::value ? std::is_trivially_move_constructible<type>::value || std::is_nothrow_move_constructible<type>::value : std::is_trivially_copy_constructible<type>::value || std::is_nothrow_copy_constructible<type>::value)\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference<valueType>::value && std::is_move_assignable<type>::value ? std::is_trivially_move_assignable<type>::value || std::is_nothrow_move_assignable<type>::value : std::is_trivially_copy_assignable<type>::value || std::is_nothrow_copy_assignable<type>::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr))\n#else\n#define MOODYCAMEL_NOEXCEPT noexcept\n#define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr)\n#define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr)\n#endif\n#endif\n\n#ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n#ifdef MCDBGQ_USE_RELACY\n#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n#else\n// VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: http://sourceforge.net/p/mingw-w64/bugs/445\n// g++ <=4.7 doesn't support thread_local either.\n// Finally, iOS/ARM doesn't have support for it either, and g++/ARM allows it to compile but it's unconfirmed to actually work\n#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) && (!defined(__APPLE__) || !TARGET_OS_IPHONE) && !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__)\n// Assume `thread_local` is fully supported in all other C++11 compilers/platforms\n//#define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED    // always disabled for now since several users report having problems with it on\n#endif\n#endif\n#endif\n\n// VS2012 doesn't support deleted functions. \n// In this case, we declare the function normally but don't define it. A link error will be generated if the function is called.\n#ifndef MOODYCAMEL_DELETE_FUNCTION\n#if defined(_MSC_VER) && _MSC_VER < 1800\n#define MOODYCAMEL_DELETE_FUNCTION\n#else\n#define MOODYCAMEL_DELETE_FUNCTION = delete\n#endif\n#endif\n\nnamespace moodycamel { namespace details {\n#ifndef MOODYCAMEL_ALIGNAS\n// VS2013 doesn't support alignas or alignof, and align() requires a constant literal\n#if defined(_MSC_VER) && _MSC_VER <= 1800\n#define MOODYCAMEL_ALIGNAS(alignment) __declspec(align(alignment))\n#define MOODYCAMEL_ALIGNOF(obj) __alignof(obj)\n#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) typename details::Vs2013Aligned<std::alignment_of<obj>::value, T>::type\n\ttemplate<int Align, typename T> struct Vs2013Aligned { };  // default, unsupported alignment\n\ttemplate<typename T> struct Vs2013Aligned<1, T> { typedef __declspec(align(1)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<2, T> { typedef __declspec(align(2)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<4, T> { typedef __declspec(align(4)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<8, T> { typedef __declspec(align(8)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<16, T> { typedef __declspec(align(16)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<32, T> { typedef __declspec(align(32)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<64, T> { typedef __declspec(align(64)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<128, T> { typedef __declspec(align(128)) T type; };\n\ttemplate<typename T> struct Vs2013Aligned<256, T> { typedef __declspec(align(256)) T type; };\n#else\n\ttemplate<typename T> struct identity { typedef T type; };\n#define MOODYCAMEL_ALIGNAS(alignment) alignas(alignment)\n#define MOODYCAMEL_ALIGNOF(obj) alignof(obj)\n#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) alignas(alignof(obj)) typename details::identity<T>::type\n#endif\n#endif\n} }\n\n\n// TSAN can false report races in lock-free code.  To enable TSAN to be used from projects that use this one,\n// we can apply per-function compile-time suppression.\n// See https://clang.llvm.org/docs/ThreadSanitizer.html#has-feature-thread-sanitizer\n#define MOODYCAMEL_NO_TSAN\n#if defined(__has_feature)\n #if __has_feature(thread_sanitizer)\n  #undef MOODYCAMEL_NO_TSAN\n  #define MOODYCAMEL_NO_TSAN __attribute__((no_sanitize(\"thread\")))\n #endif // TSAN\n#endif // TSAN\n\n// Compiler-specific likely/unlikely hints\nnamespace moodycamel { namespace details {\n#if defined(__GNUC__)\n\tstatic inline bool (likely)(bool x) { return __builtin_expect((x), true); }\n\tstatic inline bool (unlikely)(bool x) { return __builtin_expect((x), false); }\n#else\n\tstatic inline bool (likely)(bool x) { return x; }\n\tstatic inline bool (unlikely)(bool x) { return x; }\n#endif\n} }\n\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n#include \"internal/concurrentqueue_internal_debug.h\"\n#endif\n\nnamespace moodycamel {\nnamespace details {\n\ttemplate<typename T>\n\tstruct const_numeric_max {\n\t\tstatic_assert(std::is_integral<T>::value, \"const_numeric_max can only be used with integers\");\n\t\tstatic const T value = std::numeric_limits<T>::is_signed\n\t\t\t? (static_cast<T>(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast<T>(1)\n\t\t\t: static_cast<T>(-1);\n\t};\n\n#if defined(__GLIBCXX__)\n\ttypedef ::max_align_t std_max_align_t;      // libstdc++ forgot to add it to std:: for a while\n#else\n\ttypedef std::max_align_t std_max_align_t;   // Others (e.g. MSVC) insist it can *only* be accessed via std::\n#endif\n\n\t// Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting\n\t// 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64.\n\ttypedef union {\n\t\tstd_max_align_t x;\n\t\tlong long y;\n\t\tvoid* z;\n\t} max_align_t;\n}\n\n// Default traits for the ConcurrentQueue. To change some of the\n// traits without re-implementing all of them, inherit from this\n// struct and shadow the declarations you wish to be different;\n// since the traits are used as a template type parameter, the\n// shadowed declarations will be used where defined, and the defaults\n// otherwise.\nstruct ConcurrentQueueDefaultTraits\n{\n\t// General-purpose size type. std::size_t is strongly recommended.\n\ttypedef std::size_t size_t;\n\t\n\t// The type used for the enqueue and dequeue indices. Must be at least as\n\t// large as size_t. Should be significantly larger than the number of elements\n\t// you expect to hold at once, especially if you have a high turnover rate;\n\t// for example, on 32-bit x86, if you expect to have over a hundred million\n\t// elements or pump several million elements through your queue in a very\n\t// short space of time, using a 32-bit type *may* trigger a race condition.\n\t// A 64-bit int type is recommended in that case, and in practice will\n\t// prevent a race condition no matter the usage of the queue. Note that\n\t// whether the queue is lock-free with a 64-int type depends on the whether\n\t// std::atomic<std::uint64_t> is lock-free, which is platform-specific.\n\ttypedef std::size_t index_t;\n\t\n\t// Internally, all elements are enqueued and dequeued from multi-element\n\t// blocks; this is the smallest controllable unit. If you expect few elements\n\t// but many producers, a smaller block size should be favoured. For few producers\n\t// and/or many elements, a larger block size is preferred. A sane default\n\t// is provided. Must be a power of 2.\n\tstatic const size_t BLOCK_SIZE = 32;\n\t\n\t// For explicit producers (i.e. when using a producer token), the block is\n\t// checked for being empty by iterating through a list of flags, one per element.\n\t// For large block sizes, this is too inefficient, and switching to an atomic\n\t// counter-based approach is faster. The switch is made for block sizes strictly\n\t// larger than this threshold.\n\tstatic const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32;\n\t\n\t// How many full blocks can be expected for a single explicit producer? This should\n\t// reflect that number's maximum for optimal performance. Must be a power of 2.\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32;\n\t\n\t// How many full blocks can be expected for a single implicit producer? This should\n\t// reflect that number's maximum for optimal performance. Must be a power of 2.\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32;\n\t\n\t// The initial size of the hash table mapping thread IDs to implicit producers.\n\t// Note that the hash is resized every time it becomes half full.\n\t// Must be a power of two, and either 0 or at least 1. If 0, implicit production\n\t// (using the enqueue methods without an explicit producer token) is disabled.\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32;\n\t\n\t// Controls the number of items that an explicit consumer (i.e. one with a token)\n\t// must consume before it causes all consumers to rotate and move on to the next\n\t// internal queue.\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256;\n\t\n\t// The maximum number of elements (inclusive) that can be enqueued to a sub-queue.\n\t// Enqueue operations that would cause this limit to be surpassed will fail. Note\n\t// that this limit is enforced at the block level (for performance reasons), i.e.\n\t// it's rounded up to the nearest block size.\n\tstatic const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max<size_t>::value;\n\n\t// The number of times to spin before sleeping when waiting on a semaphore.\n\t// Recommended values are on the order of 1000-10000 unless the number of\n\t// consumer threads exceeds the number of idle cores (in which case try 0-100).\n\t// Only affects instances of the BlockingConcurrentQueue.\n\tstatic const int MAX_SEMA_SPINS = 10000;\n\t\n\t\n#ifndef MCDBGQ_USE_RELACY\n\t// Memory allocation can be customized if needed.\n\t// malloc should return nullptr on failure, and handle alignment like std::malloc.\n#if defined(malloc) || defined(free)\n\t// Gah, this is 2015, stop defining macros that break standard code already!\n\t// Work around malloc/free being special macros:\n\tstatic inline void* WORKAROUND_malloc(size_t size) { return malloc(size); }\n\tstatic inline void WORKAROUND_free(void* ptr) { return free(ptr); }\n\tstatic inline void* (malloc)(size_t size) { return WORKAROUND_malloc(size); }\n\tstatic inline void (free)(void* ptr) { return WORKAROUND_free(ptr); }\n#else\n\tstatic inline void* malloc(size_t size) { return std::malloc(size); }\n\tstatic inline void free(void* ptr) { return std::free(ptr); }\n#endif\n#else\n\t// Debug versions when running under the Relacy race detector (ignore\n\t// these in user code)\n\tstatic inline void* malloc(size_t size) { return rl::rl_malloc(size, $); }\n\tstatic inline void free(void* ptr) { return rl::rl_free(ptr, $); }\n#endif\n};\n\n\n// When producing or consuming many elements, the most efficient way is to:\n//    1) Use one of the bulk-operation methods of the queue with a token\n//    2) Failing that, use the bulk-operation methods without a token\n//    3) Failing that, create a token and use that with the single-item methods\n//    4) Failing that, use the single-parameter methods of the queue\n// Having said that, don't create tokens willy-nilly -- ideally there should be\n// a maximum of one token per thread (of each kind).\nstruct ProducerToken;\nstruct ConsumerToken;\n\ntemplate<typename T, typename Traits> class ConcurrentQueue;\ntemplate<typename T, typename Traits> class BlockingConcurrentQueue;\nclass ConcurrentQueueTests;\n\n\nnamespace details\n{\n\tstruct ConcurrentQueueProducerTypelessBase\n\t{\n\t\tConcurrentQueueProducerTypelessBase* next;\n\t\tstd::atomic<bool> inactive;\n\t\tProducerToken* token;\n\t\t\n\t\tConcurrentQueueProducerTypelessBase()\n\t\t\t: next(nullptr), inactive(false), token(nullptr)\n\t\t{\n\t\t}\n\t};\n\t\n\ttemplate<bool use32> struct _hash_32_or_64 {\n\t\tstatic inline std::uint32_t hash(std::uint32_t h)\n\t\t{\n\t\t\t// MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp\n\t\t\t// Since the thread ID is already unique, all we really want to do is propagate that\n\t\t\t// uniqueness evenly across all the bits, so that we can use a subset of the bits while\n\t\t\t// reducing collisions significantly\n\t\t\th ^= h >> 16;\n\t\t\th *= 0x85ebca6b;\n\t\t\th ^= h >> 13;\n\t\t\th *= 0xc2b2ae35;\n\t\t\treturn h ^ (h >> 16);\n\t\t}\n\t};\n\ttemplate<> struct _hash_32_or_64<1> {\n\t\tstatic inline std::uint64_t hash(std::uint64_t h)\n\t\t{\n\t\t\th ^= h >> 33;\n\t\t\th *= 0xff51afd7ed558ccd;\n\t\t\th ^= h >> 33;\n\t\t\th *= 0xc4ceb9fe1a85ec53;\n\t\t\treturn h ^ (h >> 33);\n\t\t}\n\t};\n\ttemplate<std::size_t size> struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> {  };\n\t\n\tstatic inline size_t hash_thread_id(thread_id_t id)\n\t{\n\t\tstatic_assert(sizeof(thread_id_t) <= 8, \"Expected a platform where thread IDs are at most 64-bit values\");\n\t\treturn static_cast<size_t>(hash_32_or_64<sizeof(thread_id_converter<thread_id_t>::thread_id_hash_t)>::hash(\n\t\t\tthread_id_converter<thread_id_t>::prehash(id)));\n\t}\n\t\n\ttemplate<typename T>\n\tstatic inline bool circular_less_than(T a, T b)\n\t{\n#ifdef _MSC_VER\n#pragma warning(push)\n#pragma warning(disable: 4554)\n#endif\n\t\tstatic_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, \"circular_less_than is intended to be used only with unsigned integer types\");\n\t\treturn static_cast<T>(a - b) > static_cast<T>(static_cast<T>(1) << static_cast<T>(sizeof(T) * CHAR_BIT - 1));\n#ifdef _MSC_VER\n#pragma warning(pop)\n#endif\n\t}\n\t\n\ttemplate<typename U>\n\tstatic inline char* align_for(char* ptr)\n\t{\n\t\tconst std::size_t alignment = std::alignment_of<U>::value;\n\t\treturn ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;\n\t}\n\n\ttemplate<typename T>\n\tstatic inline T ceil_to_pow_2(T x)\n\t{\n\t\tstatic_assert(std::is_integral<T>::value && !std::numeric_limits<T>::is_signed, \"ceil_to_pow_2 is intended to be used only with unsigned integer types\");\n\n\t\t// Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2\n\t\t--x;\n\t\tx |= x >> 1;\n\t\tx |= x >> 2;\n\t\tx |= x >> 4;\n\t\tfor (std::size_t i = 1; i < sizeof(T); i <<= 1) {\n\t\t\tx |= x >> (i << 3);\n\t\t}\n\t\t++x;\n\t\treturn x;\n\t}\n\t\n\ttemplate<typename T>\n\tstatic inline void swap_relaxed(std::atomic<T>& left, std::atomic<T>& right)\n\t{\n\t\tT temp = std::move(left.load(std::memory_order_relaxed));\n\t\tleft.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed);\n\t\tright.store(std::move(temp), std::memory_order_relaxed);\n\t}\n\t\n\ttemplate<typename T>\n\tstatic inline T const& nomove(T const& x)\n\t{\n\t\treturn x;\n\t}\n\t\n\ttemplate<bool Enable>\n\tstruct nomove_if\n\t{\n\t\ttemplate<typename T>\n\t\tstatic inline T const& eval(T const& x)\n\t\t{\n\t\t\treturn x;\n\t\t}\n\t};\n\t\n\ttemplate<>\n\tstruct nomove_if<false>\n\t{\n\t\ttemplate<typename U>\n\t\tstatic inline auto eval(U&& x)\n\t\t\t-> decltype(std::forward<U>(x))\n\t\t{\n\t\t\treturn std::forward<U>(x);\n\t\t}\n\t};\n\t\n\ttemplate<typename It>\n\tstatic inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT -> decltype(*it)\n\t{\n\t\treturn *it;\n\t}\n\t\n#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)\n\ttemplate<typename T> struct is_trivially_destructible : std::is_trivially_destructible<T> { };\n#else\n\ttemplate<typename T> struct is_trivially_destructible : std::has_trivial_destructor<T> { };\n#endif\n\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n#ifdef MCDBGQ_USE_RELACY\n\ttypedef RelacyThreadExitListener ThreadExitListener;\n\ttypedef RelacyThreadExitNotifier ThreadExitNotifier;\n#else\n\tstruct ThreadExitListener\n\t{\n\t\ttypedef void (*callback_t)(void*);\n\t\tcallback_t callback;\n\t\tvoid* userData;\n\t\t\n\t\tThreadExitListener* next;\t\t// reserved for use by the ThreadExitNotifier\n\t};\n\t\n\t\n\tclass ThreadExitNotifier\n\t{\n\tpublic:\n\t\tstatic void subscribe(ThreadExitListener* listener)\n\t\t{\n\t\t\tauto& tlsInst = instance();\n\t\t\tlistener->next = tlsInst.tail;\n\t\t\ttlsInst.tail = listener;\n\t\t}\n\t\t\n\t\tstatic void unsubscribe(ThreadExitListener* listener)\n\t\t{\n\t\t\tauto& tlsInst = instance();\n\t\t\tThreadExitListener** prev = &tlsInst.tail;\n\t\t\tfor (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) {\n\t\t\t\tif (ptr == listener) {\n\t\t\t\t\t*prev = ptr->next;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tprev = &ptr->next;\n\t\t\t}\n\t\t}\n\t\t\n\tprivate:\n\t\tThreadExitNotifier() : tail(nullptr) { }\n\t\tThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\tThreadExitNotifier& operator=(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\n\t\t~ThreadExitNotifier()\n\t\t{\n\t\t\t// This thread is about to exit, let everyone know!\n\t\t\tassert(this == &instance() && \"If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined.\");\n\t\t\tfor (auto ptr = tail; ptr != nullptr; ptr = ptr->next) {\n\t\t\t\tptr->callback(ptr->userData);\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Thread-local\n\t\tstatic inline ThreadExitNotifier& instance()\n\t\t{\n\t\t\tstatic thread_local ThreadExitNotifier notifier;\n\t\t\treturn notifier;\n\t\t}\n\t\t\n\tprivate:\n\t\tThreadExitListener* tail;\n\t};\n#endif\n#endif\n\t\n\ttemplate<typename T> struct static_is_lock_free_num { enum { value = 0 }; };\n\ttemplate<> struct static_is_lock_free_num<signed char> { enum { value = ATOMIC_CHAR_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<short> { enum { value = ATOMIC_SHORT_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<int> { enum { value = ATOMIC_INT_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<long> { enum { value = ATOMIC_LONG_LOCK_FREE }; };\n\ttemplate<> struct static_is_lock_free_num<long long> { enum { value = ATOMIC_LLONG_LOCK_FREE }; };\n\ttemplate<typename T> struct static_is_lock_free : static_is_lock_free_num<typename std::make_signed<T>::type> {  };\n\ttemplate<> struct static_is_lock_free<bool> { enum { value = ATOMIC_BOOL_LOCK_FREE }; };\n\ttemplate<typename U> struct static_is_lock_free<U*> { enum { value = ATOMIC_POINTER_LOCK_FREE }; };\n}\n\n\nstruct ProducerToken\n{\n\ttemplate<typename T, typename Traits>\n\texplicit ProducerToken(ConcurrentQueue<T, Traits>& queue);\n\t\n\ttemplate<typename T, typename Traits>\n\texplicit ProducerToken(BlockingConcurrentQueue<T, Traits>& queue);\n\t\n\tProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT\n\t\t: producer(other.producer)\n\t{\n\t\tother.producer = nullptr;\n\t\tif (producer != nullptr) {\n\t\t\tproducer->token = this;\n\t\t}\n\t}\n\t\n\tinline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap(other);\n\t\treturn *this;\n\t}\n\t\n\tvoid swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tstd::swap(producer, other.producer);\n\t\tif (producer != nullptr) {\n\t\t\tproducer->token = this;\n\t\t}\n\t\tif (other.producer != nullptr) {\n\t\t\tother.producer->token = &other;\n\t\t}\n\t}\n\t\n\t// A token is always valid unless:\n\t//     1) Memory allocation failed during construction\n\t//     2) It was moved via the move constructor\n\t//        (Note: assignment does a swap, leaving both potentially valid)\n\t//     3) The associated queue was destroyed\n\t// Note that if valid() returns true, that only indicates\n\t// that the token is valid for use with a specific queue,\n\t// but not which one; that's up to the user to track.\n\tinline bool valid() const { return producer != nullptr; }\n\t\n\t~ProducerToken()\n\t{\n\t\tif (producer != nullptr) {\n\t\t\tproducer->token = nullptr;\n\t\t\tproducer->inactive.store(true, std::memory_order_release);\n\t\t}\n\t}\n\t\n\t// Disable copying and assignment\n\tProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\tProducerToken& operator=(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\nprivate:\n\ttemplate<typename T, typename Traits> friend class ConcurrentQueue;\n\tfriend class ConcurrentQueueTests;\n\t\nprotected:\n\tdetails::ConcurrentQueueProducerTypelessBase* producer;\n};\n\n\nstruct ConsumerToken\n{\n\ttemplate<typename T, typename Traits>\n\texplicit ConsumerToken(ConcurrentQueue<T, Traits>& q);\n\t\n\ttemplate<typename T, typename Traits>\n\texplicit ConsumerToken(BlockingConcurrentQueue<T, Traits>& q);\n\t\n\tConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT\n\t\t: initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer)\n\t{\n\t}\n\t\n\tinline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap(other);\n\t\treturn *this;\n\t}\n\t\n\tvoid swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tstd::swap(initialOffset, other.initialOffset);\n\t\tstd::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset);\n\t\tstd::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent);\n\t\tstd::swap(currentProducer, other.currentProducer);\n\t\tstd::swap(desiredProducer, other.desiredProducer);\n\t}\n\t\n\t// Disable copying and assignment\n\tConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\tConsumerToken& operator=(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION;\n\nprivate:\n\ttemplate<typename T, typename Traits> friend class ConcurrentQueue;\n\tfriend class ConcurrentQueueTests;\n\t\nprivate: // but shared with ConcurrentQueue\n\tstd::uint32_t initialOffset;\n\tstd::uint32_t lastKnownGlobalOffset;\n\tstd::uint32_t itemsConsumedFromCurrent;\n\tdetails::ConcurrentQueueProducerTypelessBase* currentProducer;\n\tdetails::ConcurrentQueueProducerTypelessBase* desiredProducer;\n};\n\n// Need to forward-declare this swap because it's in a namespace.\n// See http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces\ntemplate<typename T, typename Traits>\ninline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT;\n\n\ntemplate<typename T, typename Traits = ConcurrentQueueDefaultTraits>\nclass ConcurrentQueue\n{\npublic:\n\ttypedef ::moodycamel::ProducerToken producer_token_t;\n\ttypedef ::moodycamel::ConsumerToken consumer_token_t;\n\t\n\ttypedef typename Traits::index_t index_t;\n\ttypedef typename Traits::size_t size_t;\n\t\n\tstatic const size_t BLOCK_SIZE = static_cast<size_t>(Traits::BLOCK_SIZE);\n\tstatic const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast<size_t>(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD);\n\tstatic const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::EXPLICIT_INITIAL_INDEX_SIZE);\n\tstatic const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast<size_t>(Traits::IMPLICIT_INITIAL_INDEX_SIZE);\n\tstatic const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = static_cast<size_t>(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE);\n\tstatic const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast<std::uint32_t>(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE);\n#ifdef _MSC_VER\n#pragma warning(push)\n#pragma warning(disable: 4307)\t\t// + integral constant overflow (that's what the ternary expression is for!)\n#pragma warning(disable: 4309)\t\t// static_cast: Truncation of constant value\n#endif\n\tstatic const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max<size_t>::value - static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max<size_t>::value : ((static_cast<size_t>(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE);\n#ifdef _MSC_VER\n#pragma warning(pop)\n#endif\n\n\tstatic_assert(!std::numeric_limits<size_t>::is_signed && std::is_integral<size_t>::value, \"Traits::size_t must be an unsigned integral type\");\n\tstatic_assert(!std::numeric_limits<index_t>::is_signed && std::is_integral<index_t>::value, \"Traits::index_t must be an unsigned integral type\");\n\tstatic_assert(sizeof(index_t) >= sizeof(size_t), \"Traits::index_t must be at least as wide as Traits::size_t\");\n\tstatic_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), \"Traits::BLOCK_SIZE must be a power of 2 (and at least 2)\");\n\tstatic_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), \"Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)\");\n\tstatic_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), \"Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)\");\n\tstatic_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), \"Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)\");\n\tstatic_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), \"Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2\");\n\tstatic_assert(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, \"Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)\");\n\npublic:\n\t// Creates a queue with at least `capacity` element slots; note that the\n\t// actual number of elements that can be inserted without additional memory\n\t// allocation depends on the number of producers and the block size (e.g. if\n\t// the block size is equal to `capacity`, only a single block will be allocated\n\t// up-front, which means only a single producer will be able to enqueue elements\n\t// without an extra allocation -- blocks aren't shared between producers).\n\t// This method is not thread safe -- it is up to the user to ensure that the\n\t// queue is fully constructed before it starts being used by other threads (this\n\t// includes making the memory effects of construction visible, possibly with a\n\t// memory barrier).\n\texplicit ConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE)\n\t\t: producerListTail(nullptr),\n\t\tproducerCount(0),\n\t\tinitialBlockPoolIndex(0),\n\t\tnextExplicitConsumerId(0),\n\t\tglobalExplicitConsumerOffset(0)\n\t{\n\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\tpopulate_initial_implicit_producer_hash();\n\t\tpopulate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1));\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\t// Track all the producers using a fully-resolved typed list for\n\t\t// each kind; this makes it possible to debug them starting from\n\t\t// the root queue object (otherwise wacky casts are needed that\n\t\t// don't compile in the debugger's expression evaluator).\n\t\texplicitProducers.store(nullptr, std::memory_order_relaxed);\n\t\timplicitProducers.store(nullptr, std::memory_order_relaxed);\n#endif\n\t}\n\t\n\t// Computes the correct amount of pre-allocated blocks for you based\n\t// on the minimum number of elements you want available at any given\n\t// time, and the maximum concurrent number of each type of producer.\n\tConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers)\n\t\t: producerListTail(nullptr),\n\t\tproducerCount(0),\n\t\tinitialBlockPoolIndex(0),\n\t\tnextExplicitConsumerId(0),\n\t\tglobalExplicitConsumerOffset(0)\n\t{\n\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\tpopulate_initial_implicit_producer_hash();\n\t\tsize_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers + maxImplicitProducers);\n\t\tpopulate_initial_block_list(blocks);\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\texplicitProducers.store(nullptr, std::memory_order_relaxed);\n\t\timplicitProducers.store(nullptr, std::memory_order_relaxed);\n#endif\n\t}\n\t\n\t// Note: The queue should not be accessed concurrently while it's\n\t// being deleted. It's up to the user to synchronize this.\n\t// This method is not thread safe.\n\t~ConcurrentQueue()\n\t{\n\t\t// Destroy producers\n\t\tauto ptr = producerListTail.load(std::memory_order_relaxed);\n\t\twhile (ptr != nullptr) {\n\t\t\tauto next = ptr->next_prod();\n\t\t\tif (ptr->token != nullptr) {\n\t\t\t\tptr->token->producer = nullptr;\n\t\t\t}\n\t\t\tdestroy(ptr);\n\t\t\tptr = next;\n\t\t}\n\t\t\n\t\t// Destroy implicit producer hash tables\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) {\n\t\t\tauto hash = implicitProducerHash.load(std::memory_order_relaxed);\n\t\t\twhile (hash != nullptr) {\n\t\t\t\tauto prev = hash->prev;\n\t\t\t\tif (prev != nullptr) {\t\t// The last hash is part of this object and was not allocated dynamically\n\t\t\t\t\tfor (size_t i = 0; i != hash->capacity; ++i) {\n\t\t\t\t\t\thash->entries[i].~ImplicitProducerKVP();\n\t\t\t\t\t}\n\t\t\t\t\thash->~ImplicitProducerHash();\n\t\t\t\t\t(Traits::free)(hash);\n\t\t\t\t}\n\t\t\t\thash = prev;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Destroy global free list\n\t\tauto block = freeList.head_unsafe();\n\t\twhile (block != nullptr) {\n\t\t\tauto next = block->freeListNext.load(std::memory_order_relaxed);\n\t\t\tif (block->dynamicallyAllocated) {\n\t\t\t\tdestroy(block);\n\t\t\t}\n\t\t\tblock = next;\n\t\t}\n\t\t\n\t\t// Destroy initial free list\n\t\tdestroy_array(initialBlockPool, initialBlockPoolSize);\n\t}\n\n\t// Disable copying and copy assignment\n\tConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;\n\tConcurrentQueue& operator=(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\n\t// Moving is supported, but note that it is *not* a thread-safe operation.\n\t// Nobody can use the queue while it's being moved, and the memory effects\n\t// of that move must be propagated to other threads before they can use it.\n\t// Note: When a queue is moved, its tokens are still valid but can only be\n\t// used with the destination queue (i.e. semantically they are moved along\n\t// with the queue itself).\n\tConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT\n\t\t: producerListTail(other.producerListTail.load(std::memory_order_relaxed)),\n\t\tproducerCount(other.producerCount.load(std::memory_order_relaxed)),\n\t\tinitialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)),\n\t\tinitialBlockPool(other.initialBlockPool),\n\t\tinitialBlockPoolSize(other.initialBlockPoolSize),\n\t\tfreeList(std::move(other.freeList)),\n\t\tnextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)),\n\t\tglobalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed))\n\t{\n\t\t// Move the other one into this, and leave the other one as an empty queue\n\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\tpopulate_initial_implicit_producer_hash();\n\t\tswap_implicit_producer_hashes(other);\n\t\t\n\t\tother.producerListTail.store(nullptr, std::memory_order_relaxed);\n\t\tother.producerCount.store(0, std::memory_order_relaxed);\n\t\tother.nextExplicitConsumerId.store(0, std::memory_order_relaxed);\n\t\tother.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed);\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\texplicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed);\n\t\tother.explicitProducers.store(nullptr, std::memory_order_relaxed);\n\t\timplicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed);\n\t\tother.implicitProducers.store(nullptr, std::memory_order_relaxed);\n#endif\n\t\t\n\t\tother.initialBlockPoolIndex.store(0, std::memory_order_relaxed);\n\t\tother.initialBlockPoolSize = 0;\n\t\tother.initialBlockPool = nullptr;\n\t\t\n\t\treown_producers();\n\t}\n\t\n\tinline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\treturn swap_internal(other);\n\t}\n\t\n\t// Swaps this queue's state with the other's. Not thread-safe.\n\t// Swapping two queues does not invalidate their tokens, however\n\t// the tokens that were created for one queue must be used with\n\t// only the swapped queue (i.e. the tokens are tied to the\n\t// queue's movable state, not the object itself).\n\tinline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT\n\t{\n\t\tswap_internal(other);\n\t}\n\t\nprivate:\n\tConcurrentQueue& swap_internal(ConcurrentQueue& other)\n\t{\n\t\tif (this == &other) {\n\t\t\treturn *this;\n\t\t}\n\t\t\n\t\tdetails::swap_relaxed(producerListTail, other.producerListTail);\n\t\tdetails::swap_relaxed(producerCount, other.producerCount);\n\t\tdetails::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex);\n\t\tstd::swap(initialBlockPool, other.initialBlockPool);\n\t\tstd::swap(initialBlockPoolSize, other.initialBlockPoolSize);\n\t\tfreeList.swap(other.freeList);\n\t\tdetails::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId);\n\t\tdetails::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset);\n\t\t\n\t\tswap_implicit_producer_hashes(other);\n\t\t\n\t\treown_producers();\n\t\tother.reown_producers();\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\tdetails::swap_relaxed(explicitProducers, other.explicitProducers);\n\t\tdetails::swap_relaxed(implicitProducers, other.implicitProducers);\n#endif\n\t\t\n\t\treturn *this;\n\t}\n\t\npublic:\n\t// Enqueues a single item (by copying it).\n\t// Allocates memory if required. Only fails if memory allocation fails (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,\n\t// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(T const& item)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\telse return inner_enqueue<CanAlloc>(item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible).\n\t// Allocates memory if required. Only fails if memory allocation fails (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0,\n\t// or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(T&& item)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\telse return inner_enqueue<CanAlloc>(std::move(item));\n\t}\n\t\n\t// Enqueues a single item (by copying it) using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(producer_token_t const& token, T const& item)\n\t{\n\t\treturn inner_enqueue<CanAlloc>(token, item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible) using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Thread-safe.\n\tinline bool enqueue(producer_token_t const& token, T&& item)\n\t{\n\t\treturn inner_enqueue<CanAlloc>(token, std::move(item));\n\t}\n\t\n\t// Enqueues several items.\n\t// Allocates memory if required. Only fails if memory allocation fails (or\n\t// implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE\n\t// is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Note: Use std::make_move_iterator if the elements should be moved instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\telse return inner_enqueue_bulk<CanAlloc>(itemFirst, count);\n\t}\n\t\n\t// Enqueues several items using an explicit producer token.\n\t// Allocates memory if required. Only fails if memory allocation fails\n\t// (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed).\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\treturn inner_enqueue_bulk<CanAlloc>(token, itemFirst, count);\n\t}\n\t\n\t// Enqueues a single item (by copying it).\n\t// Does not allocate memory. Fails if not enough room to enqueue (or implicit\n\t// production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE\n\t// is 0).\n\t// Thread-safe.\n\tinline bool try_enqueue(T const& item)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\telse return inner_enqueue<CannotAlloc>(item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible).\n\t// Does not allocate memory (except for one-time implicit producer).\n\t// Fails if not enough room to enqueue (or implicit production is\n\t// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).\n\t// Thread-safe.\n\tinline bool try_enqueue(T&& item)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\telse return inner_enqueue<CannotAlloc>(std::move(item));\n\t}\n\t\n\t// Enqueues a single item (by copying it) using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Thread-safe.\n\tinline bool try_enqueue(producer_token_t const& token, T const& item)\n\t{\n\t\treturn inner_enqueue<CannotAlloc>(token, item);\n\t}\n\t\n\t// Enqueues a single item (by moving it, if possible) using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Thread-safe.\n\tinline bool try_enqueue(producer_token_t const& token, T&& item)\n\t{\n\t\treturn inner_enqueue<CannotAlloc>(token, std::move(item));\n\t}\n\t\n\t// Enqueues several items.\n\t// Does not allocate memory (except for one-time implicit producer).\n\t// Fails if not enough room to enqueue (or implicit production is\n\t// disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0).\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool try_enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false;\n\t\telse return inner_enqueue_bulk<CannotAlloc>(itemFirst, count);\n\t}\n\t\n\t// Enqueues several items using an explicit producer token.\n\t// Does not allocate memory. Fails if not enough room to enqueue.\n\t// Note: Use std::make_move_iterator if the elements should be moved\n\t// instead of copied.\n\t// Thread-safe.\n\ttemplate<typename It>\n\tbool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\treturn inner_enqueue_bulk<CannotAlloc>(token, itemFirst, count);\n\t}\n\t\n\t\n\t\n\t// Attempts to dequeue from the queue.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tbool try_dequeue(U& item)\n\t{\n\t\t// Instead of simply trying each producer in turn (which could cause needless contention on the first\n\t\t// producer), we score them heuristically.\n\t\tsize_t nonEmptyCount = 0;\n\t\tProducerBase* best = nullptr;\n\t\tsize_t bestSize = 0;\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tauto size = ptr->size_approx();\n\t\t\tif (size > 0) {\n\t\t\t\tif (size > bestSize) {\n\t\t\t\t\tbestSize = size;\n\t\t\t\t\tbest = ptr;\n\t\t\t\t}\n\t\t\t\t++nonEmptyCount;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// If there was at least one non-empty queue but it appears empty at the time\n\t\t// we try to dequeue from it, we need to make sure every queue's been tried\n\t\tif (nonEmptyCount > 0) {\n\t\t\tif ((details::likely)(best->dequeue(item))) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\t\tif (ptr != best && ptr->dequeue(item)) {\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue from the queue.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// This differs from the try_dequeue(item) method in that this one does\n\t// not attempt to reduce contention by interleaving the order that producer\n\t// streams are dequeued from. So, using this method can reduce overall throughput\n\t// under contention, but will give more predictable results in single-threaded\n\t// consumer scenarios. This is mostly only useful for internal unit tests.\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tbool try_dequeue_non_interleaved(U& item)\n\t{\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tif (ptr->dequeue(item)) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue from the queue using an explicit consumer token.\n\t// Returns false if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tbool try_dequeue(consumer_token_t& token, U& item)\n\t{\n\t\t// The idea is roughly as follows:\n\t\t// Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the highest efficiency consumer dictates the rotation speed of everyone else, more or less\n\t\t// If you see that the global offset has changed, you must reset your consumption counter and move to your designated place\n\t\t// If there's no items where you're supposed to be, keep moving until you find a producer with some items\n\t\t// If the global offset has not changed but you've run out of items to consume, move over from your current position until you find an producer with something in it\n\t\t\n\t\tif (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {\n\t\t\tif (!update_current_producer_after_rotation(token)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// If there was at least one non-empty queue but it appears empty at the time\n\t\t// we try to dequeue from it, we need to make sure every queue's been tried\n\t\tif (static_cast<ProducerBase*>(token.currentProducer)->dequeue(item)) {\n\t\t\tif (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) {\n\t\t\t\tglobalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed);\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\tauto tail = producerListTail.load(std::memory_order_acquire);\n\t\tauto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();\n\t\tif (ptr == nullptr) {\n\t\t\tptr = tail;\n\t\t}\n\t\twhile (ptr != static_cast<ProducerBase*>(token.currentProducer)) {\n\t\t\tif (ptr->dequeue(item)) {\n\t\t\t\ttoken.currentProducer = ptr;\n\t\t\t\ttoken.itemsConsumedFromCurrent = 1;\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\tptr = ptr->next_prod();\n\t\t\tif (ptr == nullptr) {\n\t\t\t\tptr = tail;\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue.\n\t// Returns the number of items actually dequeued.\n\t// Returns 0 if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tsize_t try_dequeue_bulk(It itemFirst, size_t max)\n\t{\n\t\tsize_t count = 0;\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tcount += ptr->dequeue_bulk(itemFirst, max - count);\n\t\t\tif (count == max) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t// Attempts to dequeue several elements from the queue using an explicit consumer token.\n\t// Returns the number of items actually dequeued.\n\t// Returns 0 if all producer streams appeared empty at the time they\n\t// were checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tsize_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max)\n\t{\n\t\tif (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) {\n\t\t\tif (!update_current_producer_after_rotation(token)) {\n\t\t\t\treturn 0;\n\t\t\t}\n\t\t}\n\t\t\n\t\tsize_t count = static_cast<ProducerBase*>(token.currentProducer)->dequeue_bulk(itemFirst, max);\n\t\tif (count == max) {\n\t\t\tif ((token.itemsConsumedFromCurrent += static_cast<std::uint32_t>(max)) >= EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) {\n\t\t\t\tglobalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed);\n\t\t\t}\n\t\t\treturn max;\n\t\t}\n\t\ttoken.itemsConsumedFromCurrent += static_cast<std::uint32_t>(count);\n\t\tmax -= count;\n\t\t\n\t\tauto tail = producerListTail.load(std::memory_order_acquire);\n\t\tauto ptr = static_cast<ProducerBase*>(token.currentProducer)->next_prod();\n\t\tif (ptr == nullptr) {\n\t\t\tptr = tail;\n\t\t}\n\t\twhile (ptr != static_cast<ProducerBase*>(token.currentProducer)) {\n\t\t\tauto dequeued = ptr->dequeue_bulk(itemFirst, max);\n\t\t\tcount += dequeued;\n\t\t\tif (dequeued != 0) {\n\t\t\t\ttoken.currentProducer = ptr;\n\t\t\t\ttoken.itemsConsumedFromCurrent = static_cast<std::uint32_t>(dequeued);\n\t\t\t}\n\t\t\tif (dequeued == max) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tmax -= dequeued;\n\t\t\tptr = ptr->next_prod();\n\t\t\tif (ptr == nullptr) {\n\t\t\t\tptr = tail;\n\t\t\t}\n\t\t}\n\t\treturn count;\n\t}\n\t\n\t\n\t\n\t// Attempts to dequeue from a specific producer's inner queue.\n\t// If you happen to know which producer you want to dequeue from, this\n\t// is significantly faster than using the general-case try_dequeue methods.\n\t// Returns false if the producer's queue appeared empty at the time it\n\t// was checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename U>\n\tinline bool try_dequeue_from_producer(producer_token_t const& producer, U& item)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(producer.producer)->dequeue(item);\n\t}\n\t\n\t// Attempts to dequeue several elements from a specific producer's inner queue.\n\t// Returns the number of items actually dequeued.\n\t// If you happen to know which producer you want to dequeue from, this\n\t// is significantly faster than using the general-case try_dequeue methods.\n\t// Returns 0 if the producer's queue appeared empty at the time it\n\t// was checked (so, the queue is likely but not guaranteed to be empty).\n\t// Never allocates. Thread-safe.\n\ttemplate<typename It>\n\tinline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(producer.producer)->dequeue_bulk(itemFirst, max);\n\t}\n\t\n\t\n\t// Returns an estimate of the total number of elements currently in the queue. This\n\t// estimate is only accurate if the queue has completely stabilized before it is called\n\t// (i.e. all enqueue and dequeue operations have completed and their memory effects are\n\t// visible on the calling thread, and no further operations start while this method is\n\t// being called).\n\t// Thread-safe.\n\tsize_t size_approx() const\n\t{\n\t\tsize_t size = 0;\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tsize += ptr->size_approx();\n\t\t}\n\t\treturn size;\n\t}\n\t\n\t\n\t// Returns true if the underlying atomic variables used by\n\t// the queue are lock-free (they should be on most platforms).\n\t// Thread-safe.\n\tstatic bool is_lock_free()\n\t{\n\t\treturn\n\t\t\tdetails::static_is_lock_free<bool>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<size_t>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<std::uint32_t>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<index_t>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<void*>::value == 2 &&\n\t\t\tdetails::static_is_lock_free<typename details::thread_id_converter<details::thread_id_t>::thread_id_numeric_size_t>::value == 2;\n\t}\n\n\nprivate:\n\tfriend struct ProducerToken;\n\tfriend struct ConsumerToken;\n\tstruct ExplicitProducer;\n\tfriend struct ExplicitProducer;\n\tstruct ImplicitProducer;\n\tfriend struct ImplicitProducer;\n\tfriend class ConcurrentQueueTests;\n\t\t\n\tenum AllocationMode { CanAlloc, CannotAlloc };\n\t\n\t\n\t///////////////////////////////\n\t// Queue methods\n\t///////////////////////////////\n\t\n\ttemplate<AllocationMode canAlloc, typename U>\n\tinline bool inner_enqueue(producer_token_t const& token, U&& element)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue<canAlloc>(std::forward<U>(element));\n\t}\n\t\n\ttemplate<AllocationMode canAlloc, typename U>\n\tinline bool inner_enqueue(U&& element)\n\t{\n\t\tauto producer = get_or_add_implicit_producer();\n\t\treturn producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue<canAlloc>(std::forward<U>(element));\n\t}\n\t\n\ttemplate<AllocationMode canAlloc, typename It>\n\tinline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count)\n\t{\n\t\treturn static_cast<ExplicitProducer*>(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue_bulk<canAlloc>(itemFirst, count);\n\t}\n\t\n\ttemplate<AllocationMode canAlloc, typename It>\n\tinline bool inner_enqueue_bulk(It itemFirst, size_t count)\n\t{\n\t\tauto producer = get_or_add_implicit_producer();\n\t\treturn producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk<canAlloc>(itemFirst, count);\n\t}\n\t\n\tinline bool update_current_producer_after_rotation(consumer_token_t& token)\n\t{\n\t\t// Ah, there's been a rotation, figure out where we should be!\n\t\tauto tail = producerListTail.load(std::memory_order_acquire);\n\t\tif (token.desiredProducer == nullptr && tail == nullptr) {\n\t\t\treturn false;\n\t\t}\n\t\tauto prodCount = producerCount.load(std::memory_order_relaxed);\n\t\tauto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed);\n\t\tif ((details::unlikely)(token.desiredProducer == nullptr)) {\n\t\t\t// Aha, first time we're dequeueing anything.\n\t\t\t// Figure out our local position\n\t\t\t// Note: offset is from start, not end, but we're traversing from end -- subtract from count first\n\t\t\tstd::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount);\n\t\t\ttoken.desiredProducer = tail;\n\t\t\tfor (std::uint32_t i = 0; i != offset; ++i) {\n\t\t\t\ttoken.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();\n\t\t\t\tif (token.desiredProducer == nullptr) {\n\t\t\t\t\ttoken.desiredProducer = tail;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tstd::uint32_t delta = globalOffset - token.lastKnownGlobalOffset;\n\t\tif (delta >= prodCount) {\n\t\t\tdelta = delta % prodCount;\n\t\t}\n\t\tfor (std::uint32_t i = 0; i != delta; ++i) {\n\t\t\ttoken.desiredProducer = static_cast<ProducerBase*>(token.desiredProducer)->next_prod();\n\t\t\tif (token.desiredProducer == nullptr) {\n\t\t\t\ttoken.desiredProducer = tail;\n\t\t\t}\n\t\t}\n\t\t\n\t\ttoken.lastKnownGlobalOffset = globalOffset;\n\t\ttoken.currentProducer = token.desiredProducer;\n\t\ttoken.itemsConsumedFromCurrent = 0;\n\t\treturn true;\n\t}\n\t\n\t\n\t///////////////////////////\n\t// Free list\n\t///////////////////////////\n\t\n\ttemplate <typename N>\n\tstruct FreeListNode\n\t{\n\t\tFreeListNode() : freeListRefs(0), freeListNext(nullptr) { }\n\t\t\n\t\tstd::atomic<std::uint32_t> freeListRefs;\n\t\tstd::atomic<N*> freeListNext;\n\t};\n\t\n\t// A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but\n\t// simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly\n\t// speedy under low contention.\n\ttemplate<typename N>\t\t// N must inherit FreeListNode or have the same fields (and initialization of them)\n\tstruct FreeList\n\t{\n\t\tFreeList() : freeListHead(nullptr) { }\n\t\tFreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); }\n\t\tvoid swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); }\n\t\t\n\t\tFreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\tFreeList& operator=(FreeList const&) MOODYCAMEL_DELETE_FUNCTION;\n\t\t\n\t\tinline void add(N* node)\n\t\t{\n#ifdef MCDBGQ_NOLOCKFREE_FREELIST\n\t\t\tdebug::DebugLock lock(mutex);\n#endif\t\t\n\t\t\t// We know that the should-be-on-freelist bit is 0 at this point, so it's safe to\n\t\t\t// set it using a fetch_add\n\t\t\tif (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) {\n\t\t\t\t// Oh look! We were the last ones referencing this node, and we know\n\t\t\t\t// we want to add it to the free list, so let's do it!\n\t\t\t\tadd_knowing_refcount_is_zero(node);\n\t\t\t}\n\t\t}\n\t\t\n\t\tinline N* try_get()\n\t\t{\n#ifdef MCDBGQ_NOLOCKFREE_FREELIST\n\t\t\tdebug::DebugLock lock(mutex);\n#endif\t\t\n\t\t\tauto head = freeListHead.load(std::memory_order_acquire);\n\t\t\twhile (head != nullptr) {\n\t\t\t\tauto prevHead = head;\n\t\t\t\tauto refs = head->freeListRefs.load(std::memory_order_relaxed);\n\t\t\t\tif ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) {\n\t\t\t\t\thead = freeListHead.load(std::memory_order_acquire);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Good, reference count has been incremented (it wasn't at zero), which means we can read the\n\t\t\t\t// next and not worry about it changing between now and the time we do the CAS\n\t\t\t\tauto next = head->freeListNext.load(std::memory_order_relaxed);\n\t\t\t\tif (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) {\n\t\t\t\t\t// Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no\n\t\t\t\t\t// matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on).\n\t\t\t\t\tassert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);\n\t\t\t\t\t\n\t\t\t\t\t// Decrease refcount twice, once for our ref, and once for the list's ref\n\t\t\t\t\thead->freeListRefs.fetch_sub(2, std::memory_order_release);\n\t\t\t\t\treturn head;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// OK, the head must have changed on us, but we still need to decrease the refcount we increased.\n\t\t\t\t// Note that we don't need to release any memory effects, but we do need to ensure that the reference\n\t\t\t\t// count decrement happens-after the CAS on the head.\n\t\t\t\trefs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel);\n\t\t\t\tif (refs == SHOULD_BE_ON_FREELIST + 1) {\n\t\t\t\t\tadd_knowing_refcount_is_zero(prevHead);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\t// Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes)\n\t\tN* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); }\n\t\t\n\tprivate:\n\t\tinline void add_knowing_refcount_is_zero(N* node)\n\t\t{\n\t\t\t// Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run\n\t\t\t// only one copy of this method per node at a time, i.e. the single thread case), then we know\n\t\t\t// we can safely change the next pointer of the node; however, once the refcount is back above\n\t\t\t// zero, then other threads could increase it (happens under heavy contention, when the refcount\n\t\t\t// goes to zero in between a load and a refcount increment of a node in try_get, then back up to\n\t\t\t// something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS\n\t\t\t// to add the node to the actual list fails, decrease the refcount and leave the add operation to\n\t\t\t// the next thread who puts the refcount back at zero (which could be us, hence the loop).\n\t\t\tauto head = freeListHead.load(std::memory_order_relaxed);\n\t\t\twhile (true) {\n\t\t\t\tnode->freeListNext.store(head, std::memory_order_relaxed);\n\t\t\t\tnode->freeListRefs.store(1, std::memory_order_release);\n\t\t\t\tif (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) {\n\t\t\t\t\t// Hmm, the add failed, but we can only try again when the refcount goes back to zero\n\t\t\t\t\tif (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t\n\tprivate:\n\t\t// Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention)\n\t\tstd::atomic<N*> freeListHead;\n\t\n\tstatic const std::uint32_t REFS_MASK = 0x7FFFFFFF;\n\tstatic const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000;\n\t\t\n#ifdef MCDBGQ_NOLOCKFREE_FREELIST\n\t\tdebug::DebugMutex mutex;\n#endif\n\t};\n\t\n\t\n\t///////////////////////////\n\t// Block\n\t///////////////////////////\n\t\n\tenum InnerQueueContext { implicit_context = 0, explicit_context = 1 };\n\t\n\tstruct Block\n\t{\n\t\tBlock()\n\t\t\t: next(nullptr), elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), shouldBeOnFreeList(false), dynamicallyAllocated(true)\n\t\t{\n#ifdef MCDBGQ_TRACKMEM\n\t\t\towner = nullptr;\n#endif\n\t\t}\n\t\t\n\t\ttemplate<InnerQueueContext context>\n\t\tinline bool is_empty() const\n\t\t{\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Check flags\n\t\t\t\tfor (size_t i = 0; i < BLOCK_SIZE; ++i) {\n\t\t\t\t\tif (!emptyFlags[i].load(std::memory_order_relaxed)) {\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Aha, empty; make sure we have all other memory effects that happened before the empty flags were set\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Check counter\n\t\t\t\tif (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) {\n\t\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\tassert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Returns true if the block is now empty (does not apply in explicit context)\n\t\ttemplate<InnerQueueContext context>\n\t\tinline bool set_empty(MOODYCAMEL_MAYBE_UNUSED index_t i)\n\t\t{\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Set flag\n\t\t\t\tassert(!emptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].load(std::memory_order_relaxed));\n\t\t\t\temptyFlags[BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1))].store(true, std::memory_order_release);\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Increment counter\n\t\t\t\tauto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release);\n\t\t\t\tassert(prevVal < BLOCK_SIZE);\n\t\t\t\treturn prevVal == BLOCK_SIZE - 1;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0).\n\t\t// Returns true if the block is now empty (does not apply in explicit context).\n\t\ttemplate<InnerQueueContext context>\n\t\tinline bool set_many_empty(MOODYCAMEL_MAYBE_UNUSED index_t i, size_t count)\n\t\t{\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Set flags\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_release);\n\t\t\t\ti = BLOCK_SIZE - 1 - static_cast<size_t>(i & static_cast<index_t>(BLOCK_SIZE - 1)) - count + 1;\n\t\t\t\tfor (size_t j = 0; j != count; ++j) {\n\t\t\t\t\tassert(!emptyFlags[i + j].load(std::memory_order_relaxed));\n\t\t\t\t\temptyFlags[i + j].store(true, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Increment counter\n\t\t\t\tauto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release);\n\t\t\t\tassert(prevVal + count <= BLOCK_SIZE);\n\t\t\t\treturn prevVal + count == BLOCK_SIZE;\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<InnerQueueContext context>\n\t\tinline void set_all_empty()\n\t\t{\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Set all flags\n\t\t\t\tfor (size_t i = 0; i != BLOCK_SIZE; ++i) {\n\t\t\t\t\temptyFlags[i].store(true, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Reset counter\n\t\t\t\telementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed);\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<InnerQueueContext context>\n\t\tinline void reset_empty()\n\t\t{\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) {\n\t\t\t\t// Reset flags\n\t\t\t\tfor (size_t i = 0; i != BLOCK_SIZE; ++i) {\n\t\t\t\t\temptyFlags[i].store(false, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Reset counter\n\t\t\t\telementsCompletelyDequeued.store(0, std::memory_order_relaxed);\n\t\t\t}\n\t\t}\n\t\t\n\t\tinline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT { return static_cast<T*>(static_cast<void*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }\n\t\tinline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT { return static_cast<T const*>(static_cast<void const*>(elements)) + static_cast<size_t>(idx & static_cast<index_t>(BLOCK_SIZE - 1)); }\n\t\t\n\tprivate:\n\t\tstatic_assert(std::alignment_of<T>::value <= sizeof(T), \"The queue does not support types with an alignment greater than their size at this time\");\n\t\tMOODYCAMEL_ALIGNED_TYPE_LIKE(char[sizeof(T) * BLOCK_SIZE], T) elements;\n\tpublic:\n\t\tBlock* next;\n\t\tstd::atomic<size_t> elementsCompletelyDequeued;\n\t\tstd::atomic<bool> emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1];\n\tpublic:\n\t\tstd::atomic<std::uint32_t> freeListRefs;\n\t\tstd::atomic<Block*> freeListNext;\n\t\tstd::atomic<bool> shouldBeOnFreeList;\n\t\tbool dynamicallyAllocated;\t\t// Perhaps a better name for this would be 'isNotPartOfInitialBlockPool'\n\t\t\n#ifdef MCDBGQ_TRACKMEM\n\t\tvoid* owner;\n#endif\n\t};\n\tstatic_assert(std::alignment_of<Block>::value >= std::alignment_of<T>::value, \"Internal error: Blocks must be at least as aligned as the type they are wrapping\");\n\n\n#ifdef MCDBGQ_TRACKMEM\npublic:\n\tstruct MemStats;\nprivate:\n#endif\n\t\n\t///////////////////////////\n\t// Producer base\n\t///////////////////////////\n\t\n\tstruct ProducerBase : public details::ConcurrentQueueProducerTypelessBase\n\t{\n\t\tProducerBase(ConcurrentQueue* parent_, bool isExplicit_) :\n\t\t\ttailIndex(0),\n\t\t\theadIndex(0),\n\t\t\tdequeueOptimisticCount(0),\n\t\t\tdequeueOvercommit(0),\n\t\t\ttailBlock(nullptr),\n\t\t\tisExplicit(isExplicit_),\n\t\t\tparent(parent_)\n\t\t{\n\t\t}\n\t\t\n\t\tvirtual ~ProducerBase() { }\n\t\t\n\t\ttemplate<typename U>\n\t\tinline bool dequeue(U& element)\n\t\t{\n\t\t\tif (isExplicit) {\n\t\t\t\treturn static_cast<ExplicitProducer*>(this)->dequeue(element);\n\t\t\t}\n\t\t\telse {\n\t\t\t\treturn static_cast<ImplicitProducer*>(this)->dequeue(element);\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<typename It>\n\t\tinline size_t dequeue_bulk(It& itemFirst, size_t max)\n\t\t{\n\t\t\tif (isExplicit) {\n\t\t\t\treturn static_cast<ExplicitProducer*>(this)->dequeue_bulk(itemFirst, max);\n\t\t\t}\n\t\t\telse {\n\t\t\t\treturn static_cast<ImplicitProducer*>(this)->dequeue_bulk(itemFirst, max);\n\t\t\t}\n\t\t}\n\t\t\n\t\tinline ProducerBase* next_prod() const { return static_cast<ProducerBase*>(next); }\n\t\t\n\t\tinline size_t size_approx() const\n\t\t{\n\t\t\tauto tail = tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto head = headIndex.load(std::memory_order_relaxed);\n\t\t\treturn details::circular_less_than(head, tail) ? static_cast<size_t>(tail - head) : 0;\n\t\t}\n\t\t\n\t\tinline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); }\n\tprotected:\n\t\tstd::atomic<index_t> tailIndex;\t\t// Where to enqueue to next\n\t\tstd::atomic<index_t> headIndex;\t\t// Where to dequeue from next\n\t\t\n\t\tstd::atomic<index_t> dequeueOptimisticCount;\n\t\tstd::atomic<index_t> dequeueOvercommit;\n\t\t\n\t\tBlock* tailBlock;\n\t\t\n\tpublic:\n\t\tbool isExplicit;\n\t\tConcurrentQueue* parent;\n\t\t\n\tprotected:\n#ifdef MCDBGQ_TRACKMEM\n\t\tfriend struct MemStats;\n#endif\n\t};\n\t\n\t\n\t///////////////////////////\n\t// Explicit queue\n\t///////////////////////////\n\t\t\n\tstruct ExplicitProducer : public ProducerBase\n\t{\n\t\texplicit ExplicitProducer(ConcurrentQueue* parent_) :\n\t\t\tProducerBase(parent_, true),\n\t\t\tblockIndex(nullptr),\n\t\t\tpr_blockIndexSlotsUsed(0),\n\t\t\tpr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1),\n\t\t\tpr_blockIndexFront(0),\n\t\t\tpr_blockIndexEntries(nullptr),\n\t\t\tpr_blockIndexRaw(nullptr)\n\t\t{\n\t\t\tsize_t poolBasedIndexSize = details::ceil_to_pow_2(parent_->initialBlockPoolSize) >> 1;\n\t\t\tif (poolBasedIndexSize > pr_blockIndexSize) {\n\t\t\t\tpr_blockIndexSize = poolBasedIndexSize;\n\t\t\t}\n\t\t\t\n\t\t\tnew_block_index(0);\t\t// This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE\n\t\t}\n\t\t\n\t\t~ExplicitProducer()\n\t\t{\n\t\t\t// Destruct any elements not yet dequeued.\n\t\t\t// Since we're in the destructor, we can assume all elements\n\t\t\t// are either completely dequeued or completely not (no halfways).\n\t\t\tif (this->tailBlock != nullptr) {\t\t// Note this means there must be a block index too\n\t\t\t\t// First find the block that's partially dequeued, if any\n\t\t\t\tBlock* halfDequeuedBlock = nullptr;\n\t\t\t\tif ((this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) != 0) {\n\t\t\t\t\t// The head's not on a block boundary, meaning a block somewhere is partially dequeued\n\t\t\t\t\t// (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary)\n\t\t\t\t\tsize_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1);\n\t\t\t\t\twhile (details::circular_less_than<index_t>(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) {\n\t\t\t\t\t\ti = (i + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t\t}\n\t\t\t\t\tassert(details::circular_less_than<index_t>(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed)));\n\t\t\t\t\thalfDequeuedBlock = pr_blockIndexEntries[i].block;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration)\n\t\t\t\tauto block = this->tailBlock;\n\t\t\t\tdo {\n\t\t\t\t\tblock = block->next;\n\t\t\t\t\tif (block->ConcurrentQueue::Block::template is_empty<explicit_context>()) {\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tsize_t i = 0;\t// Offset into block\n\t\t\t\t\tif (block == halfDequeuedBlock) {\n\t\t\t\t\t\ti = static_cast<size_t>(this->headIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index\n\t\t\t\t\tauto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast<size_t>(this->tailIndex.load(std::memory_order_relaxed) & static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\t\t\twhile (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) {\n\t\t\t\t\t\t(*block)[i++]->~T();\n\t\t\t\t\t}\n\t\t\t\t} while (block != this->tailBlock);\n\t\t\t}\n\t\t\t\n\t\t\t// Destroy all blocks that we own\n\t\t\tif (this->tailBlock != nullptr) {\n\t\t\t\tauto block = this->tailBlock;\n\t\t\t\tdo {\n\t\t\t\t\tauto nextBlock = block->next;\n\t\t\t\t\tif (block->dynamicallyAllocated) {\n\t\t\t\t\t\tdestroy(block);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\n\t\t\t\t\t}\n\t\t\t\t\tblock = nextBlock;\n\t\t\t\t} while (block != this->tailBlock);\n\t\t\t}\n\t\t\t\n\t\t\t// Destroy the block indices\n\t\t\tauto header = static_cast<BlockIndexHeader*>(pr_blockIndexRaw);\n\t\t\twhile (header != nullptr) {\n\t\t\t\tauto prev = static_cast<BlockIndexHeader*>(header->prev);\n\t\t\t\theader->~BlockIndexHeader();\n\t\t\t\t(Traits::free)(header);\n\t\t\t\theader = prev;\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename U>\n\t\tinline bool enqueue(U&& element)\n\t\t{\n\t\t\tindex_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tindex_t newTailIndex = 1 + currentTailIndex;\n\t\t\tif ((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t// We reached the end of a block, start a new one\n\t\t\t\tauto startBlock = this->tailBlock;\n\t\t\t\tauto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed;\n\t\t\t\tif (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::template is_empty<explicit_context>()) {\n\t\t\t\t\t// We can re-use the block ahead of us, it's empty!\t\t\t\t\t\n\t\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t\t\tthis->tailBlock->ConcurrentQueue::Block::template reset_empty<explicit_context>();\n\t\t\t\t\t\n\t\t\t\t\t// We'll put the block on the block index (guaranteed to be room since we're conceptually removing the\n\t\t\t\t\t// last block from it first -- except instead of removing then adding, we can just overwrite).\n\t\t\t\t\t// Note that there must be a valid block index here, since even if allocation failed in the ctor,\n\t\t\t\t\t// it would have been re-attempted when adding the first block to the queue; since there is such\n\t\t\t\t\t// a block, a block index must have been successfully allocated.\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Whatever head value we see here is >= the last value we saw here (relatively),\n\t\t\t\t\t// and <= its current value. Since we have the most recent tail, the head must be\n\t\t\t\t\t// <= to it.\n\t\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\t\tif (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE)\n\t\t\t\t\t\t|| (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {\n\t\t\t\t\t\t// We can't enqueue in another block because there's not enough leeway -- the\n\t\t\t\t\t\t// tail could surpass the head by the time the block fills up! (Or we'll exceed\n\t\t\t\t\t\t// the size limit, if the second part of the condition was true.)\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t// We're going to need a new block; check that the block index has room\n\t\t\t\t\tif (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) {\n\t\t\t\t\t\t// Hmm, the circular block index is already full -- we'll need\n\t\t\t\t\t\t// to allocate a new index. Note pr_blockIndexRaw can only be nullptr if\n\t\t\t\t\t\t// the initial allocation failed in the constructor.\n\t\t\t\t\t\t\n\t\t\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) {\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if (!new_block_index(pr_blockIndexSlotsUsed)) {\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Insert a new block in the circular linked list\n\t\t\t\t\tauto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();\n\t\t\t\t\tif (newBlock == nullptr) {\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n#ifdef MCDBGQ_TRACKMEM\n\t\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\t\tnewBlock->ConcurrentQueue::Block::template reset_empty<explicit_context>();\n\t\t\t\t\tif (this->tailBlock == nullptr) {\n\t\t\t\t\t\tnewBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tnewBlock->next = this->tailBlock->next;\n\t\t\t\t\t\tthis->tailBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\t++pr_blockIndexSlotsUsed;\n\t\t\t\t}\n\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {\n\t\t\t\t\t// The constructor may throw. We want the element not to appear in the queue in\n\t\t\t\t\t// that case (without corrupting the queue):\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t// Revert change to the current block, but leave the new block available\n\t\t\t\t\t\t// for next time\n\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock;\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t(void)startBlock;\n\t\t\t\t\t(void)originalBlockIndexSlotsUsed;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Add block to block index\n\t\t\t\tauto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];\n\t\t\t\tentry.base = currentTailIndex;\n\t\t\t\tentry.block = this->tailBlock;\n\t\t\t\tblockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release);\n\t\t\t\tpr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {\n\t\t\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue\n\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename U>\n\t\tbool dequeue(U& element)\n\t\t{\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tif (details::circular_less_than<index_t>(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {\n\t\t\t\t// Might be something to dequeue, let's give it a try\n\t\t\t\t\n\t\t\t\t// Note that this if is purely for performance purposes in the common case when the queue is\n\t\t\t\t// empty and the values are eventually consistent -- we may enter here spuriously.\n\t\t\t\t\n\t\t\t\t// Note that whatever the values of overcommit and tail are, they are not going to change (unless we\n\t\t\t\t// change them) and must be the same value at this point (inside the if) as when the if condition was\n\t\t\t\t// evaluated.\n\n\t\t\t\t// We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below.\n\t\t\t\t// This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in\n\t\t\t\t// the fetch_add below will result in a value at least as recent as that (and therefore at least as large).\n\t\t\t\t// Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all\n\t\t\t\t// read-modify-write operations are guaranteed to work on the latest value in the modification order), but\n\t\t\t\t// unfortunately that can't be shown to be correct using only the C++11 standard.\n\t\t\t\t// See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\t// Increment optimistic counter, then check if it went over the boundary\n\t\t\t\tauto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\t// Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever\n\t\t\t\t// incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now\n\t\t\t\t// have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon\n\t\t\t\t// incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount.\n\t\t\t\t// However, we can't assert this since both dequeueOptimisticCount and dequeueOvercommit may (independently)\n\t\t\t\t// overflow; in such a case, though, the logic still holds since the difference between the two is maintained.\n\t\t\t\t\n\t\t\t\t// Note that we reload tail here in case it changed; it will be the same value as before or greater, since\n\t\t\t\t// this load is sequenced after (happens after) the earlier load above. This is supported by read-read\n\t\t\t\t// coherency (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tif ((details::likely)(details::circular_less_than<index_t>(myDequeueCount - overcommit, tail))) {\n\t\t\t\t\t// Guaranteed to be at least one element to dequeue!\n\t\t\t\t\t\n\t\t\t\t\t// Get the index. Note that since there's guaranteed to be at least one element, this\n\t\t\t\t\t// will never exceed tail. We need to do an acquire-release fence here since it's possible\n\t\t\t\t\t// that whatever condition got us to this point was for an earlier enqueued element (that\n\t\t\t\t\t// we already see the memory effects for), but that by the time we increment somebody else\n\t\t\t\t\t// has incremented it, and we need to see the memory effects for *that* element, which is\n\t\t\t\t\t// in such a case is necessarily visible on the thread that incremented it in the first\n\t\t\t\t\t// place with the more current condition (they must have acquired a tail that is at least\n\t\t\t\t\t// as recent).\n\t\t\t\t\tauto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t// Determine which block the element is in\n\t\t\t\t\t\n\t\t\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_acquire);\n\t\t\t\t\tauto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);\n\t\t\t\t\t\n\t\t\t\t\t// We need to be careful here about subtracting and dividing because of index wrap-around.\n\t\t\t\t\t// When an index wraps, we need to preserve the sign of the offset when dividing it by the\n\t\t\t\t\t// block size (in order to get a correct signed block count offset in all cases):\n\t\t\t\t\tauto headBase = localBlockIndex->entries[localBlockIndexHead].base;\n\t\t\t\t\tauto blockBaseIndex = index & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\tauto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(blockBaseIndex - headBase) / BLOCK_SIZE);\n\t\t\t\t\tauto block = localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block;\n\t\t\t\t\t\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {\n\t\t\t\t\t\t// Make sure the element is still fully dequeued and destroyed even if the assignment\n\t\t\t\t\t\t// throws\n\t\t\t\t\t\tstruct Guard {\n\t\t\t\t\t\t\tBlock* block;\n\t\t\t\t\t\t\tindex_t index;\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t~Guard()\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t(*block)[index]->~T();\n\t\t\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_empty<explicit_context>(index);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} guard = { block, index };\n\n\t\t\t\t\t\telement = std::move(el); // NOLINT\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\telement = std::move(el); // NOLINT\n\t\t\t\t\t\tel.~T(); // NOLINT\n\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_empty<explicit_context>(index);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(1, std::memory_order_release);\t\t// Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write\n\t\t\t\t}\n\t\t\t}\n\t\t\n\t\t\treturn false;\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename It>\n\t\tbool MOODYCAMEL_NO_TSAN enqueue_bulk(It itemFirst, size_t count)\n\t\t{\n\t\t\t// First, we need to make sure we have enough room to enqueue all of the elements;\n\t\t\t// this means pre-allocating blocks and putting them in the block index (but only if\n\t\t\t// all the allocations succeeded).\n\t\t\tindex_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto startBlock = this->tailBlock;\n\t\t\tauto originalBlockIndexFront = pr_blockIndexFront;\n\t\t\tauto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed;\n\t\t\t\n\t\t\tBlock* firstAllocatedBlock = nullptr;\n\t\t\t\n\t\t\t// Figure out how many blocks we'll need to allocate, and do so\n\t\t\tsize_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\tindex_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\tif (blockBaseDiff > 0) {\n\t\t\t\t// Allocate as many blocks as possible from ahead\n\t\t\t\twhile (blockBaseDiff > 0 && this->tailBlock != nullptr && this->tailBlock->next != firstAllocatedBlock && this->tailBlock->next->ConcurrentQueue::Block::template is_empty<explicit_context>()) {\n\t\t\t\t\tblockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\n\t\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t\t\tfirstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock;\n\t\t\t\t\t\n\t\t\t\t\tauto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];\n\t\t\t\t\tentry.base = currentTailIndex;\n\t\t\t\t\tentry.block = this->tailBlock;\n\t\t\t\t\tpr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Now allocate as many blocks as necessary from the block pool\n\t\t\t\twhile (blockBaseDiff > 0) {\n\t\t\t\t\tblockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\n\t\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\t\tbool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));\n\t\t\t\t\tif (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) {\n\t\t\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) {\n\t\t\t\t\t\t\t// Failed to allocate, undo changes (but keep injected blocks)\n\t\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if (full || !new_block_index(originalBlockIndexSlotsUsed)) {\n\t\t\t\t\t\t\t// Failed to allocate, undo changes (but keep injected blocks)\n\t\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\t// pr_blockIndexFront is updated inside new_block_index, so we need to\n\t\t\t\t\t\t// update our fallback value too (since we keep the new index even if we\n\t\t\t\t\t\t// later fail)\n\t\t\t\t\t\toriginalBlockIndexFront = originalBlockIndexSlotsUsed;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Insert a new block in the circular linked list\n\t\t\t\t\tauto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();\n\t\t\t\t\tif (newBlock == nullptr) {\n\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t\n#ifdef MCDBGQ_TRACKMEM\n\t\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\t\tnewBlock->ConcurrentQueue::Block::template set_all_empty<explicit_context>();\n\t\t\t\t\tif (this->tailBlock == nullptr) {\n\t\t\t\t\t\tnewBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tnewBlock->next = this->tailBlock->next;\n\t\t\t\t\t\tthis->tailBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\tfirstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock;\n\t\t\t\t\t\n\t\t\t\t\t++pr_blockIndexSlotsUsed;\n\t\t\t\t\t\n\t\t\t\t\tauto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront];\n\t\t\t\t\tentry.base = currentTailIndex;\n\t\t\t\t\tentry.block = this->tailBlock;\n\t\t\t\t\tpr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and\n\t\t\t\t// publish the new block index front\n\t\t\t\tauto block = firstAllocatedBlock;\n\t\t\t\twhile (true) {\n\t\t\t\t\tblock->ConcurrentQueue::Block::template reset_empty<explicit_context>();\n\t\t\t\t\tif (block == this->tailBlock) {\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tblock = block->next;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\t\tblockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue, one block at a time\n\t\t\tindex_t newTailIndex = startTailIndex + static_cast<index_t>(count);\n\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\tauto endBlock = this->tailBlock;\n\t\t\tthis->tailBlock = startBlock;\n\t\t\tassert((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0);\n\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) {\n\t\t\t\tthis->tailBlock = firstAllocatedBlock;\n\t\t\t}\n\t\t\twhile (true) {\n\t\t\t\tindex_t stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\tif (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {\n\t\t\t\t\tstopIndex = newTailIndex;\n\t\t\t\t}\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\t// Must use copy constructor even if move constructor is available\n\t\t\t\t\t\t\t// because we may have to revert if there's an exception.\n\t\t\t\t\t\t\t// Sorry about the horrible templated next line, but it was the only way\n\t\t\t\t\t\t\t// to disable moving *at compile time*, which is important because a type\n\t\t\t\t\t\t\t// may only define a (noexcept) move constructor, and so calls to the\n\t\t\t\t\t\t\t// cctor will not compile, even if they are in an if branch that will never\n\t\t\t\t\t\t\t// be executed\n\t\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst));\n\t\t\t\t\t\t\t++currentTailIndex;\n\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t// Oh dear, an exception's been thrown -- destroy the elements that\n\t\t\t\t\t\t// were enqueued so far and revert the entire bulk operation (we'll keep\n\t\t\t\t\t\t// any allocated blocks in our linked list for later, though).\n\t\t\t\t\t\tauto constructedStopIndex = currentTailIndex;\n\t\t\t\t\t\tauto lastBlockEnqueued = this->tailBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\tpr_blockIndexFront = originalBlockIndexFront;\n\t\t\t\t\t\tpr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed;\n\t\t\t\t\t\tthis->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (!details::is_trivially_destructible<T>::value) {\n\t\t\t\t\t\t\tauto block = startBlock;\n\t\t\t\t\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t\t\t\t\tblock = firstAllocatedBlock;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\t\tstopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\tif (details::circular_less_than<index_t>(constructedStopIndex, stopIndex)) {\n\t\t\t\t\t\t\t\t\tstopIndex = constructedStopIndex;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\t\t\t(*block)[currentTailIndex++]->~T();\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (block == lastBlockEnqueued) {\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tblock = block->next;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (this->tailBlock == endBlock) {\n\t\t\t\t\tassert(currentTailIndex == newTailIndex);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t}\n\t\t\t\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\tif (firstAllocatedBlock != nullptr)\n\t\t\t\t\tblockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);\n\t\t\t}\n\t\t\t\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename It>\n\t\tsize_t dequeue_bulk(It& itemFirst, size_t max)\n\t\t{\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tauto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));\n\t\t\tif (details::circular_less_than<size_t>(0, desiredCount)) {\n\t\t\t\tdesiredCount = desiredCount < max ? desiredCount : max;\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\tauto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tauto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));\n\t\t\t\tif (details::circular_less_than<size_t>(0, actualCount)) {\n\t\t\t\t\tactualCount = desiredCount < actualCount ? desiredCount : actualCount;\n\t\t\t\t\tif (actualCount < desiredCount) {\n\t\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Get the first index. Note that since there's guaranteed to be at least actualCount elements, this\n\t\t\t\t\t// will never exceed tail.\n\t\t\t\t\tauto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t// Determine which block the first element is in\n\t\t\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_acquire);\n\t\t\t\t\tauto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire);\n\t\t\t\t\t\n\t\t\t\t\tauto headBase = localBlockIndex->entries[localBlockIndexHead].base;\n\t\t\t\t\tauto firstBlockBaseIndex = firstIndex & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\tauto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(firstBlockBaseIndex - headBase) / BLOCK_SIZE);\n\t\t\t\t\tauto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1);\n\t\t\t\t\t\n\t\t\t\t\t// Iterate the blocks and dequeue\n\t\t\t\t\tauto index = firstIndex;\n\t\t\t\t\tdo {\n\t\t\t\t\t\tauto firstIndexInBlock = index;\n\t\t\t\t\t\tindex_t endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\tauto block = localBlockIndex->entries[indexIndex].block;\n\t\t\t\t\t\tif (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {\n\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t*itemFirst++ = std::move(el);\n\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t\t*itemFirst = std::move(el);\n\t\t\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t\t\t// It's too late to revert the dequeue, but we can make sure that all\n\t\t\t\t\t\t\t\t// the dequeued objects are properly destroyed and the block index\n\t\t\t\t\t\t\t\t// (and empty count) are properly updated before we propagate the exception\n\t\t\t\t\t\t\t\tdo {\n\t\t\t\t\t\t\t\t\tblock = localBlockIndex->entries[indexIndex].block;\n\t\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\t\t(*block)[index++]->~T();\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_many_empty<explicit_context>(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));\n\t\t\t\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tfirstIndexInBlock = index;\n\t\t\t\t\t\t\t\t\tendIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblock->ConcurrentQueue::Block::template set_many_empty<explicit_context>(firstIndexInBlock, static_cast<size_t>(endIndex - firstIndexInBlock));\n\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->size - 1);\n\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\n\t\t\t\t\treturn actualCount;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\treturn 0;\n\t\t}\n\t\t\n\tprivate:\n\t\tstruct BlockIndexEntry\n\t\t{\n\t\t\tindex_t base;\n\t\t\tBlock* block;\n\t\t};\n\t\t\n\t\tstruct BlockIndexHeader\n\t\t{\n\t\t\tsize_t size;\n\t\t\tstd::atomic<size_t> front;\t\t// Current slot (not next, like pr_blockIndexFront)\n\t\t\tBlockIndexEntry* entries;\n\t\t\tvoid* prev;\n\t\t};\n\t\t\n\t\t\n\t\tbool new_block_index(size_t numberOfFilledSlotsToExpose)\n\t\t{\n\t\t\tauto prevBlockSizeMask = pr_blockIndexSize - 1;\n\t\t\t\n\t\t\t// Create the new block\n\t\t\tpr_blockIndexSize <<= 1;\n\t\t\tauto newRawPtr = static_cast<char*>((Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize));\n\t\t\tif (newRawPtr == nullptr) {\n\t\t\t\tpr_blockIndexSize >>= 1;\t\t// Reset to allow graceful retry\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t\n\t\t\tauto newBlockIndexEntries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(newRawPtr + sizeof(BlockIndexHeader)));\n\t\t\t\n\t\t\t// Copy in all the old indices, if any\n\t\t\tsize_t j = 0;\n\t\t\tif (pr_blockIndexSlotsUsed != 0) {\n\t\t\t\tauto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask;\n\t\t\t\tdo {\n\t\t\t\t\tnewBlockIndexEntries[j++] = pr_blockIndexEntries[i];\n\t\t\t\t\ti = (i + 1) & prevBlockSizeMask;\n\t\t\t\t} while (i != pr_blockIndexFront);\n\t\t\t}\n\t\t\t\n\t\t\t// Update everything\n\t\t\tauto header = new (newRawPtr) BlockIndexHeader;\n\t\t\theader->size = pr_blockIndexSize;\n\t\t\theader->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed);\n\t\t\theader->entries = newBlockIndexEntries;\n\t\t\theader->prev = pr_blockIndexRaw;\t\t// we link the new block to the old one so we can free it later\n\t\t\t\n\t\t\tpr_blockIndexFront = j;\n\t\t\tpr_blockIndexEntries = newBlockIndexEntries;\n\t\t\tpr_blockIndexRaw = newRawPtr;\n\t\t\tblockIndex.store(header, std::memory_order_release);\n\t\t\t\n\t\t\treturn true;\n\t\t}\n\t\t\n\tprivate:\n\t\tstd::atomic<BlockIndexHeader*> blockIndex;\n\t\t\n\t\t// To be used by producer only -- consumer must use the ones in referenced by blockIndex\n\t\tsize_t pr_blockIndexSlotsUsed;\n\t\tsize_t pr_blockIndexSize;\n\t\tsize_t pr_blockIndexFront;\t\t// Next slot (not current)\n\t\tBlockIndexEntry* pr_blockIndexEntries;\n\t\tvoid* pr_blockIndexRaw;\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\tpublic:\n\t\tExplicitProducer* nextExplicitProducer;\n\tprivate:\n#endif\n\t\t\n#ifdef MCDBGQ_TRACKMEM\n\t\tfriend struct MemStats;\n#endif\n\t};\n\t\n\t\n\t//////////////////////////////////\n\t// Implicit queue\n\t//////////////////////////////////\n\t\n\tstruct ImplicitProducer : public ProducerBase\n\t{\t\t\t\n\t\tImplicitProducer(ConcurrentQueue* parent_) :\n\t\t\tProducerBase(parent_, false),\n\t\t\tnextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE),\n\t\t\tblockIndex(nullptr)\n\t\t{\n\t\t\tnew_block_index();\n\t\t}\n\t\t\n\t\t~ImplicitProducer()\n\t\t{\n\t\t\t// Note that since we're in the destructor we can assume that all enqueue/dequeue operations\n\t\t\t// completed already; this means that all undequeued elements are placed contiguously across\n\t\t\t// contiguous blocks, and that only the first and last remaining blocks can be only partially\n\t\t\t// empty (all other remaining blocks must be completely full).\n\t\t\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t// Unregister ourselves for thread termination notification\n\t\t\tif (!this->inactive.load(std::memory_order_relaxed)) {\n\t\t\t\tdetails::ThreadExitNotifier::unsubscribe(&threadExitListener);\n\t\t\t}\n#endif\n\t\t\t\n\t\t\t// Destroy all remaining elements!\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto index = this->headIndex.load(std::memory_order_relaxed);\n\t\t\tBlock* block = nullptr;\n\t\t\tassert(index == tail || details::circular_less_than(index, tail));\n\t\t\tbool forceFreeLastBlock = index != tail;\t\t// If we enter the loop, then the last (tail) block will not be freed\n\t\t\twhile (index != tail) {\n\t\t\t\tif ((index & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 || block == nullptr) {\n\t\t\t\t\tif (block != nullptr) {\n\t\t\t\t\t\t// Free the old block\n\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tblock = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t((*block)[index])->~T();\n\t\t\t\t++index;\n\t\t\t}\n\t\t\t// Even if the queue is empty, there's still one block that's not on the free list\n\t\t\t// (unless the head index reached the end of it, in which case the tail will be poised\n\t\t\t// to create a new block).\n\t\t\tif (this->tailBlock != nullptr && (forceFreeLastBlock || (tail & static_cast<index_t>(BLOCK_SIZE - 1)) != 0)) {\n\t\t\t\tthis->parent->add_block_to_free_list(this->tailBlock);\n\t\t\t}\n\t\t\t\n\t\t\t// Destroy block index\n\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_relaxed);\n\t\t\tif (localBlockIndex != nullptr) {\n\t\t\t\tfor (size_t i = 0; i != localBlockIndex->capacity; ++i) {\n\t\t\t\t\tlocalBlockIndex->index[i]->~BlockIndexEntry();\n\t\t\t\t}\n\t\t\t\tdo {\n\t\t\t\t\tauto prev = localBlockIndex->prev;\n\t\t\t\t\tlocalBlockIndex->~BlockIndexHeader();\n\t\t\t\t\t(Traits::free)(localBlockIndex);\n\t\t\t\t\tlocalBlockIndex = prev;\n\t\t\t\t} while (localBlockIndex != nullptr);\n\t\t\t}\n\t\t}\n\t\t\n\t\ttemplate<AllocationMode allocMode, typename U>\n\t\tinline bool enqueue(U&& element)\n\t\t{\n\t\t\tindex_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tindex_t newTailIndex = 1 + currentTailIndex;\n\t\t\tif ((currentTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t// We reached the end of a block, start a new one\n\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\tif (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t// Find out where we'll be inserting this block in the block index\n\t\t\t\tBlockIndexEntry* idxEntry;\n\t\t\t\tif (!insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Get ahold of a new block\n\t\t\t\tauto newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>();\n\t\t\t\tif (newBlock == nullptr) {\n\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\treturn false;\n\t\t\t\t}\n#ifdef MCDBGQ_TRACKMEM\n\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\tnewBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();\n\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {\n\t\t\t\t\t// May throw, try to insert now before we publish the fact that we have this new block\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\tnew ((*newBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\tthis->parent->add_block_to_free_list(newBlock);\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\t// Insert the new block into the index\n\t\t\t\tidxEntry->value.store(newBlock, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast<T*>(nullptr)) T(std::forward<U>(element)))) {\n\t\t\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue\n\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(std::forward<U>(element));\n\t\t\t\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\ttemplate<typename U>\n\t\tbool dequeue(U& element)\n\t\t{\n\t\t\t// See ExplicitProducer::dequeue for rationale and explanation\n\t\t\tindex_t tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tindex_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tif (details::circular_less_than<index_t>(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) {\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\tindex_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tif ((details::likely)(details::circular_less_than<index_t>(myDequeueCount - overcommit, tail))) {\n\t\t\t\t\tindex_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t// Determine which block the element is in\n\t\t\t\t\tauto entry = get_block_index_entry_for_index(index);\n\t\t\t\t\t\n\t\t\t\t\t// Dequeue\n\t\t\t\t\tauto block = entry->value.load(std::memory_order_relaxed);\n\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\n\t\t\t\t\tif (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t// Note: Acquiring the mutex with every dequeue instead of only when a block\n\t\t\t\t\t\t// is released is very sub-optimal, but it is, after all, purely debug code.\n\t\t\t\t\t\tdebug::DebugLock lock(producer->mutex);\n#endif\n\t\t\t\t\t\tstruct Guard {\n\t\t\t\t\t\t\tBlock* block;\n\t\t\t\t\t\t\tindex_t index;\n\t\t\t\t\t\t\tBlockIndexEntry* entry;\n\t\t\t\t\t\t\tConcurrentQueue* parent;\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t~Guard()\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t(*block)[index]->~T();\n\t\t\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {\n\t\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\tparent->add_block_to_free_list(block);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} guard = { block, index, entry, this->parent };\n\n\t\t\t\t\t\telement = std::move(el); // NOLINT\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\telement = std::move(el); // NOLINT\n\t\t\t\t\t\tel.~T(); // NOLINT\n\n\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {\n\t\t\t\t\t\t\t{\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t\t\t\t\t// Add the block back into the global free pool (and remove from block index)\n\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\t\t// releases the above store\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(1, std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\n\t\t\treturn false;\n\t\t}\n\t\t\n#ifdef _MSC_VER\n#pragma warning(push)\n#pragma warning(disable: 4706)  // assignment within conditional expression\n#endif\n\t\ttemplate<AllocationMode allocMode, typename It>\n\t\tbool enqueue_bulk(It itemFirst, size_t count)\n\t\t{\n\t\t\t// First, we need to make sure we have enough room to enqueue all of the elements;\n\t\t\t// this means pre-allocating blocks and putting them in the block index (but only if\n\t\t\t// all the allocations succeeded).\n\t\t\t\n\t\t\t// Note that the tailBlock we start off with may not be owned by us any more;\n\t\t\t// this happens if it was filled up exactly to the top (setting tailIndex to\n\t\t\t// the first index of the next block which is not yet allocated), then dequeued\n\t\t\t// completely (putting it on the free list) before we enqueue again.\n\t\t\t\n\t\t\tindex_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto startBlock = this->tailBlock;\n\t\t\tBlock* firstAllocatedBlock = nullptr;\n\t\t\tauto endBlock = this->tailBlock;\n\t\t\t\n\t\t\t// Figure out how many blocks we'll need to allocate, and do so\n\t\t\tsize_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));\n\t\t\tindex_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\tif (blockBaseDiff > 0) {\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\tdo {\n\t\t\t\t\tblockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\n\t\t\t\t\t// Find out where we'll be inserting this block in the block index\n\t\t\t\t\tBlockIndexEntry* idxEntry = nullptr;  // initialization here unnecessary but compiler can't always tell\n\t\t\t\t\tBlock* newBlock;\n\t\t\t\t\tbool indexInserted = false;\n\t\t\t\t\tauto head = this->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\tassert(!details::circular_less_than<index_t>(currentTailIndex, head));\n\t\t\t\t\tbool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));\n\n\t\t\t\t\tif (full || !(indexInserted = insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>()) == nullptr) {\n\t\t\t\t\t\t// Index allocation or block allocation failed; revert any other allocations\n\t\t\t\t\t\t// and index insertions done so far for this operation\n\t\t\t\t\t\tif (indexInserted) {\n\t\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcurrentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\t\tfor (auto block = firstAllocatedBlock; block != nullptr; block = block->next) {\n\t\t\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\tidxEntry = get_block_index_entry_for_index(currentTailIndex);\n\t\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tthis->parent->add_blocks_to_free_list(firstAllocatedBlock);\n\t\t\t\t\t\tthis->tailBlock = startBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t\n#ifdef MCDBGQ_TRACKMEM\n\t\t\t\t\tnewBlock->owner = this;\n#endif\n\t\t\t\t\tnewBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();\n\t\t\t\t\tnewBlock->next = nullptr;\n\t\t\t\t\t\n\t\t\t\t\t// Insert the new block into the index\n\t\t\t\t\tidxEntry->value.store(newBlock, std::memory_order_relaxed);\n\t\t\t\t\t\n\t\t\t\t\t// Store the chain of blocks so that we can undo if later allocations fail,\n\t\t\t\t\t// and so that we can find the blocks when we do the actual enqueueing\n\t\t\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr) {\n\t\t\t\t\t\tassert(this->tailBlock != nullptr);\n\t\t\t\t\t\tthis->tailBlock->next = newBlock;\n\t\t\t\t\t}\n\t\t\t\t\tthis->tailBlock = newBlock;\n\t\t\t\t\tendBlock = newBlock;\n\t\t\t\t\tfirstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock;\n\t\t\t\t} while (blockBaseDiff > 0);\n\t\t\t}\n\t\t\t\n\t\t\t// Enqueue, one block at a time\n\t\t\tindex_t newTailIndex = startTailIndex + static_cast<index_t>(count);\n\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\tthis->tailBlock = startBlock;\n\t\t\tassert((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0);\n\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) {\n\t\t\t\tthis->tailBlock = firstAllocatedBlock;\n\t\t\t}\n\t\t\twhile (true) {\n\t\t\t\tindex_t stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\tif (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {\n\t\t\t\t\tstopIndex = newTailIndex;\n\t\t\t\t}\n\t\t\t\tMOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))) {\n\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\tnew ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if<!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast<T*>(nullptr)) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst));\n\t\t\t\t\t\t\t++currentTailIndex;\n\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\tauto constructedStopIndex = currentTailIndex;\n\t\t\t\t\t\tauto lastBlockEnqueued = this->tailBlock;\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (!details::is_trivially_destructible<T>::value) {\n\t\t\t\t\t\t\tauto block = startBlock;\n\t\t\t\t\t\t\tif ((startTailIndex & static_cast<index_t>(BLOCK_SIZE - 1)) == 0) {\n\t\t\t\t\t\t\t\tblock = firstAllocatedBlock;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcurrentTailIndex = startTailIndex;\n\t\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\t\tstopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\tif (details::circular_less_than<index_t>(constructedStopIndex, stopIndex)) {\n\t\t\t\t\t\t\t\t\tstopIndex = constructedStopIndex;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\twhile (currentTailIndex != stopIndex) {\n\t\t\t\t\t\t\t\t\t(*block)[currentTailIndex++]->~T();\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (block == lastBlockEnqueued) {\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tblock = block->next;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tcurrentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\t\t\t\tfor (auto block = firstAllocatedBlock; block != nullptr; block = block->next) {\n\t\t\t\t\t\t\tcurrentTailIndex += static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\tauto idxEntry = get_block_index_entry_for_index(currentTailIndex);\n\t\t\t\t\t\t\tidxEntry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\trewind_block_index_tail();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tthis->parent->add_blocks_to_free_list(firstAllocatedBlock);\n\t\t\t\t\t\tthis->tailBlock = startBlock;\n\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (this->tailBlock == endBlock) {\n\t\t\t\t\tassert(currentTailIndex == newTailIndex);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tthis->tailBlock = this->tailBlock->next;\n\t\t\t}\n\t\t\tthis->tailIndex.store(newTailIndex, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n#ifdef _MSC_VER\n#pragma warning(pop)\n#endif\n\t\t\n\t\ttemplate<typename It>\n\t\tsize_t dequeue_bulk(It& itemFirst, size_t max)\n\t\t{\n\t\t\tauto tail = this->tailIndex.load(std::memory_order_relaxed);\n\t\t\tauto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed);\n\t\t\tauto desiredCount = static_cast<size_t>(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit));\n\t\t\tif (details::circular_less_than<size_t>(0, desiredCount)) {\n\t\t\t\tdesiredCount = desiredCount < max ? desiredCount : max;\n\t\t\t\tstd::atomic_thread_fence(std::memory_order_acquire);\n\t\t\t\t\n\t\t\t\tauto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed);\n\t\t\t\t\n\t\t\t\ttail = this->tailIndex.load(std::memory_order_acquire);\n\t\t\t\tauto actualCount = static_cast<size_t>(tail - (myDequeueCount - overcommit));\n\t\t\t\tif (details::circular_less_than<size_t>(0, actualCount)) {\n\t\t\t\t\tactualCount = desiredCount < actualCount ? desiredCount : actualCount;\n\t\t\t\t\tif (actualCount < desiredCount) {\n\t\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release);\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// Get the first index. Note that since there's guaranteed to be at least actualCount elements, this\n\t\t\t\t\t// will never exceed tail.\n\t\t\t\t\tauto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel);\n\t\t\t\t\t\n\t\t\t\t\t// Iterate the blocks and dequeue\n\t\t\t\t\tauto index = firstIndex;\n\t\t\t\t\tBlockIndexHeader* localBlockIndex;\n\t\t\t\t\tauto indexIndex = get_block_index_index_for_index(index, localBlockIndex);\n\t\t\t\t\tdo {\n\t\t\t\t\t\tauto blockStartIndex = index;\n\t\t\t\t\t\tindex_t endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\t\n\t\t\t\t\t\tauto entry = localBlockIndex->index[indexIndex];\n\t\t\t\t\t\tauto block = entry->value.load(std::memory_order_relaxed);\n\t\t\t\t\t\tif (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {\n\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t*itemFirst++ = std::move(el);\n\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tMOODYCAMEL_TRY {\n\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\tauto& el = *((*block)[index]);\n\t\t\t\t\t\t\t\t\t*itemFirst = std::move(el);\n\t\t\t\t\t\t\t\t\t++itemFirst;\n\t\t\t\t\t\t\t\t\tel.~T();\n\t\t\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tMOODYCAMEL_CATCH (...) {\n\t\t\t\t\t\t\t\tdo {\n\t\t\t\t\t\t\t\t\tentry = localBlockIndex->index[indexIndex];\n\t\t\t\t\t\t\t\t\tblock = entry->value.load(std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\twhile (index != endIndex) {\n\t\t\t\t\t\t\t\t\t\t(*block)[index++]->~T();\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1);\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tblockStartIndex = index;\n\t\t\t\t\t\t\t\t\tendIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);\n\t\t\t\t\t\t\t\t\tendIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;\n\t\t\t\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tMOODYCAMEL_RETHROW;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {\n\t\t\t\t\t\t\t{\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\t\t\t\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\t\t\t\t\t\t// Note that the set_many_empty above did a release, meaning that anybody who acquires the block\n\t\t\t\t\t\t\t\t// we're about to free can use it safely since our writes (and reads!) will have happened-before then.\n\t\t\t\t\t\t\t\tentry->value.store(nullptr, std::memory_order_relaxed);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tthis->parent->add_block_to_free_list(block);\t\t// releases the above store\n\t\t\t\t\t\t}\n\t\t\t\t\t\tindexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1);\n\t\t\t\t\t} while (index != firstIndex + actualCount);\n\t\t\t\t\t\n\t\t\t\t\treturn actualCount;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\treturn 0;\n\t\t}\n\t\t\n\tprivate:\n\t\t// The block size must be > 1, so any number with the low bit set is an invalid block base index\n\t\tstatic const index_t INVALID_BLOCK_BASE = 1;\n\t\t\n\t\tstruct BlockIndexEntry\n\t\t{\n\t\t\tstd::atomic<index_t> key;\n\t\t\tstd::atomic<Block*> value;\n\t\t};\n\t\t\n\t\tstruct BlockIndexHeader\n\t\t{\n\t\t\tsize_t capacity;\n\t\t\tstd::atomic<size_t> tail;\n\t\t\tBlockIndexEntry* entries;\n\t\t\tBlockIndexEntry** index;\n\t\t\tBlockIndexHeader* prev;\n\t\t};\n\t\t\n\t\ttemplate<AllocationMode allocMode>\n\t\tinline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex)\n\t\t{\n\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_relaxed);\t\t// We're the only writer thread, relaxed is OK\n\t\t\tif (localBlockIndex == nullptr) {\n\t\t\t\treturn false;  // this can happen if new_block_index failed in the constructor\n\t\t\t}\n\t\t\tsize_t newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);\n\t\t\tidxEntry = localBlockIndex->index[newTail];\n\t\t\tif (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE ||\n\t\t\t\tidxEntry->value.load(std::memory_order_relaxed) == nullptr) {\n\t\t\t\t\n\t\t\t\tidxEntry->key.store(blockStartIndex, std::memory_order_relaxed);\n\t\t\t\tlocalBlockIndex->tail.store(newTail, std::memory_order_release);\n\t\t\t\treturn true;\n\t\t\t}\n\t\t\t\n\t\t\t// No room in the old block index, try to allocate another one!\n\t\t\tMOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse if (!new_block_index()) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\tlocalBlockIndex = blockIndex.load(std::memory_order_relaxed);\n\t\t\tnewTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);\n\t\t\tidxEntry = localBlockIndex->index[newTail];\n\t\t\tassert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE);\n\t\t\tidxEntry->key.store(blockStartIndex, std::memory_order_relaxed);\n\t\t\tlocalBlockIndex->tail.store(newTail, std::memory_order_release);\n\t\t\treturn true;\n\t\t}\n\t\t\n\t\tinline void rewind_block_index_tail()\n\t\t{\n\t\t\tauto localBlockIndex = blockIndex.load(std::memory_order_relaxed);\n\t\t\tlocalBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & (localBlockIndex->capacity - 1), std::memory_order_relaxed);\n\t\t}\n\t\t\n\t\tinline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const\n\t\t{\n\t\t\tBlockIndexHeader* localBlockIndex;\n\t\t\tauto idx = get_block_index_index_for_index(index, localBlockIndex);\n\t\t\treturn localBlockIndex->index[idx];\n\t\t}\n\t\t\n\t\tinline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const\n\t\t{\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\t\tdebug::DebugLock lock(mutex);\n#endif\n\t\t\tindex &= ~static_cast<index_t>(BLOCK_SIZE - 1);\n\t\t\tlocalBlockIndex = blockIndex.load(std::memory_order_acquire);\n\t\t\tauto tail = localBlockIndex->tail.load(std::memory_order_acquire);\n\t\t\tauto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed);\n\t\t\tassert(tailBase != INVALID_BLOCK_BASE);\n\t\t\t// Note: Must use division instead of shift because the index may wrap around, causing a negative\n\t\t\t// offset, whose negativity we want to preserve\n\t\t\tauto offset = static_cast<size_t>(static_cast<typename std::make_signed<index_t>::type>(index - tailBase) / BLOCK_SIZE);\n\t\t\tsize_t idx = (tail + offset) & (localBlockIndex->capacity - 1);\n\t\t\tassert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr);\n\t\t\treturn idx;\n\t\t}\n\t\t\n\t\tbool new_block_index()\n\t\t{\n\t\t\tauto prev = blockIndex.load(std::memory_order_relaxed);\n\t\t\tsize_t prevCapacity = prev == nullptr ? 0 : prev->capacity;\n\t\t\tauto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity;\n\t\t\tauto raw = static_cast<char*>((Traits::malloc)(\n\t\t\t\tsizeof(BlockIndexHeader) +\n\t\t\t\tstd::alignment_of<BlockIndexEntry>::value - 1 + sizeof(BlockIndexEntry) * entryCount +\n\t\t\t\tstd::alignment_of<BlockIndexEntry*>::value - 1 + sizeof(BlockIndexEntry*) * nextBlockIndexCapacity));\n\t\t\tif (raw == nullptr) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t\n\t\t\tauto header = new (raw) BlockIndexHeader;\n\t\t\tauto entries = reinterpret_cast<BlockIndexEntry*>(details::align_for<BlockIndexEntry>(raw + sizeof(BlockIndexHeader)));\n\t\t\tauto index = reinterpret_cast<BlockIndexEntry**>(details::align_for<BlockIndexEntry*>(reinterpret_cast<char*>(entries) + sizeof(BlockIndexEntry) * entryCount));\n\t\t\tif (prev != nullptr) {\n\t\t\t\tauto prevTail = prev->tail.load(std::memory_order_relaxed);\n\t\t\t\tauto prevPos = prevTail;\n\t\t\t\tsize_t i = 0;\n\t\t\t\tdo {\n\t\t\t\t\tprevPos = (prevPos + 1) & (prev->capacity - 1);\n\t\t\t\t\tindex[i++] = prev->index[prevPos];\n\t\t\t\t} while (prevPos != prevTail);\n\t\t\t\tassert(i == prevCapacity);\n\t\t\t}\n\t\t\tfor (size_t i = 0; i != entryCount; ++i) {\n\t\t\t\tnew (entries + i) BlockIndexEntry;\n\t\t\t\tentries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed);\n\t\t\t\tindex[prevCapacity + i] = entries + i;\n\t\t\t}\n\t\t\theader->prev = prev;\n\t\t\theader->entries = entries;\n\t\t\theader->index = index;\n\t\t\theader->capacity = nextBlockIndexCapacity;\n\t\t\theader->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed);\n\t\t\t\n\t\t\tblockIndex.store(header, std::memory_order_release);\n\t\t\t\n\t\t\tnextBlockIndexCapacity <<= 1;\n\t\t\t\n\t\t\treturn true;\n\t\t}\n\t\t\n\tprivate:\n\t\tsize_t nextBlockIndexCapacity;\n\t\tstd::atomic<BlockIndexHeader*> blockIndex;\n\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\tpublic:\n\t\tdetails::ThreadExitListener threadExitListener;\n\tprivate:\n#endif\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\tpublic:\n\t\tImplicitProducer* nextImplicitProducer;\n\tprivate:\n#endif\n\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX\n\t\tmutable debug::DebugMutex mutex;\n#endif\n#ifdef MCDBGQ_TRACKMEM\n\t\tfriend struct MemStats;\n#endif\n\t};\n\t\n\t\n\t//////////////////////////////////\n\t// Block pool manipulation\n\t//////////////////////////////////\n\t\n\tvoid populate_initial_block_list(size_t blockCount)\n\t{\n\t\tinitialBlockPoolSize = blockCount;\n\t\tif (initialBlockPoolSize == 0) {\n\t\t\tinitialBlockPool = nullptr;\n\t\t\treturn;\n\t\t}\n\t\t\n\t\tinitialBlockPool = create_array<Block>(blockCount);\n\t\tif (initialBlockPool == nullptr) {\n\t\t\tinitialBlockPoolSize = 0;\n\t\t}\n\t\tfor (size_t i = 0; i < initialBlockPoolSize; ++i) {\n\t\t\tinitialBlockPool[i].dynamicallyAllocated = false;\n\t\t}\n\t}\n\t\n\tinline Block* try_get_block_from_initial_pool()\n\t{\n\t\tif (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) {\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\tauto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed);\n\t\t\n\t\treturn index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr;\n\t}\n\t\n\tinline void add_block_to_free_list(Block* block)\n\t{\n#ifdef MCDBGQ_TRACKMEM\n\t\tblock->owner = nullptr;\n#endif\n\t\tfreeList.add(block);\n\t}\n\t\n\tinline void add_blocks_to_free_list(Block* block)\n\t{\n\t\twhile (block != nullptr) {\n\t\t\tauto next = block->next;\n\t\t\tadd_block_to_free_list(block);\n\t\t\tblock = next;\n\t\t}\n\t}\n\t\n\tinline Block* try_get_block_from_free_list()\n\t{\n\t\treturn freeList.try_get();\n\t}\n\t\n\t// Gets a free block from one of the memory pools, or allocates a new one (if applicable)\n\ttemplate<AllocationMode canAlloc>\n\tBlock* requisition_block()\n\t{\n\t\tauto block = try_get_block_from_initial_pool();\n\t\tif (block != nullptr) {\n\t\t\treturn block;\n\t\t}\n\t\t\n\t\tblock = try_get_block_from_free_list();\n\t\tif (block != nullptr) {\n\t\t\treturn block;\n\t\t}\n\t\t\n\t\tMOODYCAMEL_CONSTEXPR_IF (canAlloc == CanAlloc) {\n\t\t\treturn create<Block>();\n\t\t}\n\t\telse {\n\t\t\treturn nullptr;\n\t\t}\n\t}\n\t\n\n#ifdef MCDBGQ_TRACKMEM\n\tpublic:\n\t\tstruct MemStats {\n\t\t\tsize_t allocatedBlocks;\n\t\t\tsize_t usedBlocks;\n\t\t\tsize_t freeBlocks;\n\t\t\tsize_t ownedBlocksExplicit;\n\t\t\tsize_t ownedBlocksImplicit;\n\t\t\tsize_t implicitProducers;\n\t\t\tsize_t explicitProducers;\n\t\t\tsize_t elementsEnqueued;\n\t\t\tsize_t blockClassBytes;\n\t\t\tsize_t queueClassBytes;\n\t\t\tsize_t implicitBlockIndexBytes;\n\t\t\tsize_t explicitBlockIndexBytes;\n\t\t\t\n\t\t\tfriend class ConcurrentQueue;\n\t\t\t\n\t\tprivate:\n\t\t\tstatic MemStats getFor(ConcurrentQueue* q)\n\t\t\t{\n\t\t\t\tMemStats stats = { 0 };\n\t\t\t\t\n\t\t\t\tstats.elementsEnqueued = q->size_approx();\n\t\t\t\n\t\t\t\tauto block = q->freeList.head_unsafe();\n\t\t\t\twhile (block != nullptr) {\n\t\t\t\t\t++stats.allocatedBlocks;\n\t\t\t\t\t++stats.freeBlocks;\n\t\t\t\t\tblock = block->freeListNext.load(std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\t\t\tbool implicit = dynamic_cast<ImplicitProducer*>(ptr) != nullptr;\n\t\t\t\t\tstats.implicitProducers += implicit ? 1 : 0;\n\t\t\t\t\tstats.explicitProducers += implicit ? 0 : 1;\n\t\t\t\t\t\n\t\t\t\t\tif (implicit) {\n\t\t\t\t\t\tauto prod = static_cast<ImplicitProducer*>(ptr);\n\t\t\t\t\t\tstats.queueClassBytes += sizeof(ImplicitProducer);\n\t\t\t\t\t\tauto head = prod->headIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\tauto tail = prod->tailIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\tauto hash = prod->blockIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\tif (hash != nullptr) {\n\t\t\t\t\t\t\tfor (size_t i = 0; i != hash->capacity; ++i) {\n\t\t\t\t\t\t\t\tif (hash->index[i]->key.load(std::memory_order_relaxed) != ImplicitProducer::INVALID_BLOCK_BASE && hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) {\n\t\t\t\t\t\t\t\t\t++stats.allocatedBlocks;\n\t\t\t\t\t\t\t\t\t++stats.ownedBlocksImplicit;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstats.implicitBlockIndexBytes += hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry);\n\t\t\t\t\t\t\tfor (; hash != nullptr; hash = hash->prev) {\n\t\t\t\t\t\t\t\tstats.implicitBlockIndexBytes += sizeof(typename ImplicitProducer::BlockIndexHeader) + hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor (; details::circular_less_than<index_t>(head, tail); head += BLOCK_SIZE) {\n\t\t\t\t\t\t\t//auto block = prod->get_block_index_entry_for_index(head);\n\t\t\t\t\t\t\t++stats.usedBlocks;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tauto prod = static_cast<ExplicitProducer*>(ptr);\n\t\t\t\t\t\tstats.queueClassBytes += sizeof(ExplicitProducer);\n\t\t\t\t\t\tauto tailBlock = prod->tailBlock;\n\t\t\t\t\t\tbool wasNonEmpty = false;\n\t\t\t\t\t\tif (tailBlock != nullptr) {\n\t\t\t\t\t\t\tauto block = tailBlock;\n\t\t\t\t\t\t\tdo {\n\t\t\t\t\t\t\t\t++stats.allocatedBlocks;\n\t\t\t\t\t\t\t\tif (!block->ConcurrentQueue::Block::template is_empty<explicit_context>() || wasNonEmpty) {\n\t\t\t\t\t\t\t\t\t++stats.usedBlocks;\n\t\t\t\t\t\t\t\t\twasNonEmpty = wasNonEmpty || block != tailBlock;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t++stats.ownedBlocksExplicit;\n\t\t\t\t\t\t\t\tblock = block->next;\n\t\t\t\t\t\t\t} while (block != tailBlock);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tauto index = prod->blockIndex.load(std::memory_order_relaxed);\n\t\t\t\t\t\twhile (index != nullptr) {\n\t\t\t\t\t\t\tstats.explicitBlockIndexBytes += sizeof(typename ExplicitProducer::BlockIndexHeader) + index->size * sizeof(typename ExplicitProducer::BlockIndexEntry);\n\t\t\t\t\t\t\tindex = static_cast<typename ExplicitProducer::BlockIndexHeader*>(index->prev);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tauto freeOnInitialPool = q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize ? 0 : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed);\n\t\t\t\tstats.allocatedBlocks += freeOnInitialPool;\n\t\t\t\tstats.freeBlocks += freeOnInitialPool;\n\t\t\t\t\n\t\t\t\tstats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks;\n\t\t\t\tstats.queueClassBytes += sizeof(ConcurrentQueue);\n\t\t\t\t\n\t\t\t\treturn stats;\n\t\t\t}\n\t\t};\n\t\t\n\t\t// For debugging only. Not thread-safe.\n\t\tMemStats getMemStats()\n\t\t{\n\t\t\treturn MemStats::getFor(this);\n\t\t}\n\tprivate:\n\t\tfriend struct MemStats;\n#endif\n\t\n\t\n\t//////////////////////////////////\n\t// Producer list manipulation\n\t//////////////////////////////////\t\n\t\n\tProducerBase* recycle_or_create_producer(bool isExplicit)\n\t{\n\t\tbool recycled;\n\t\treturn recycle_or_create_producer(isExplicit, recycled);\n\t}\n\t\n\tProducerBase* recycle_or_create_producer(bool isExplicit, bool& recycled)\n\t{\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\t\tdebug::DebugLock lock(implicitProdMutex);\n#endif\n\t\t// Try to re-use one first\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tif (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) {\n\t\t\t\tbool expected = true;\n\t\t\t\tif (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) {\n\t\t\t\t\t// We caught one! It's been marked as activated, the caller can have it\n\t\t\t\t\trecycled = true;\n\t\t\t\t\treturn ptr;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\trecycled = false;\n\t\treturn add_producer(isExplicit ? static_cast<ProducerBase*>(create<ExplicitProducer>(this)) : create<ImplicitProducer>(this));\n\t}\n\t\n\tProducerBase* add_producer(ProducerBase* producer)\n\t{\n\t\t// Handle failed memory allocation\n\t\tif (producer == nullptr) {\n\t\t\treturn nullptr;\n\t\t}\n\t\t\n\t\tproducerCount.fetch_add(1, std::memory_order_relaxed);\n\t\t\n\t\t// Add it to the lock-free list\n\t\tauto prevTail = producerListTail.load(std::memory_order_relaxed);\n\t\tdo {\n\t\t\tproducer->next = prevTail;\n\t\t} while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed));\n\t\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\t\tif (producer->isExplicit) {\n\t\t\tauto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed);\n\t\t\tdo {\n\t\t\t\tstatic_cast<ExplicitProducer*>(producer)->nextExplicitProducer = prevTailExplicit;\n\t\t\t} while (!explicitProducers.compare_exchange_weak(prevTailExplicit, static_cast<ExplicitProducer*>(producer), std::memory_order_release, std::memory_order_relaxed));\n\t\t}\n\t\telse {\n\t\t\tauto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed);\n\t\t\tdo {\n\t\t\t\tstatic_cast<ImplicitProducer*>(producer)->nextImplicitProducer = prevTailImplicit;\n\t\t\t} while (!implicitProducers.compare_exchange_weak(prevTailImplicit, static_cast<ImplicitProducer*>(producer), std::memory_order_release, std::memory_order_relaxed));\n\t\t}\n#endif\n\t\t\n\t\treturn producer;\n\t}\n\t\n\tvoid reown_producers()\n\t{\n\t\t// After another instance is moved-into/swapped-with this one, all the\n\t\t// producers we stole still think their parents are the other queue.\n\t\t// So fix them up!\n\t\tfor (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) {\n\t\t\tptr->parent = this;\n\t\t}\n\t}\n\t\n\t\n\t//////////////////////////////////\n\t// Implicit producer hash\n\t//////////////////////////////////\n\t\n\tstruct ImplicitProducerKVP\n\t{\n\t\tstd::atomic<details::thread_id_t> key;\n\t\tImplicitProducer* value;\t\t// No need for atomicity since it's only read by the thread that sets it in the first place\n\t\t\n\t\tImplicitProducerKVP() : value(nullptr) { }\n\t\t\n\t\tImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT\n\t\t{\n\t\t\tkey.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed);\n\t\t\tvalue = other.value;\n\t\t}\n\t\t\n\t\tinline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT\n\t\t{\n\t\t\tswap(other);\n\t\t\treturn *this;\n\t\t}\n\t\t\n\t\tinline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT\n\t\t{\n\t\t\tif (this != &other) {\n\t\t\t\tdetails::swap_relaxed(key, other.key);\n\t\t\t\tstd::swap(value, other.value);\n\t\t\t}\n\t\t}\n\t};\n\t\n\ttemplate<typename XT, typename XTraits>\n\tfriend void moodycamel::swap(typename ConcurrentQueue<XT, XTraits>::ImplicitProducerKVP&, typename ConcurrentQueue<XT, XTraits>::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT;\n\t\n\tstruct ImplicitProducerHash\n\t{\n\t\tsize_t capacity;\n\t\tImplicitProducerKVP* entries;\n\t\tImplicitProducerHash* prev;\n\t};\n\t\n\tinline void populate_initial_implicit_producer_hash()\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) {\n\t\t\treturn;\n\t\t}\n\t\telse {\n\t\t\timplicitProducerHashCount.store(0, std::memory_order_relaxed);\n\t\t\tauto hash = &initialImplicitProducerHash;\n\t\t\thash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE;\n\t\t\thash->entries = &initialImplicitProducerHashEntries[0];\n\t\t\tfor (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) {\n\t\t\t\tinitialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);\n\t\t\t}\n\t\t\thash->prev = nullptr;\n\t\t\timplicitProducerHash.store(hash, std::memory_order_relaxed);\n\t\t}\n\t}\n\t\n\tvoid swap_implicit_producer_hashes(ConcurrentQueue& other)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) {\n\t\t\treturn;\n\t\t}\n\t\telse {\n\t\t\t// Swap (assumes our implicit producer hash is initialized)\n\t\t\tinitialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries);\n\t\t\tinitialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0];\n\t\t\tother.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0];\n\t\t\t\n\t\t\tdetails::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount);\n\t\t\t\n\t\t\tdetails::swap_relaxed(implicitProducerHash, other.implicitProducerHash);\n\t\t\tif (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) {\n\t\t\t\timplicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tImplicitProducerHash* hash;\n\t\t\t\tfor (hash = implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &other.initialImplicitProducerHash; hash = hash->prev) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\thash->prev = &initialImplicitProducerHash;\n\t\t\t}\n\t\t\tif (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) {\n\t\t\t\tother.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tImplicitProducerHash* hash;\n\t\t\t\tfor (hash = other.implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &initialImplicitProducerHash; hash = hash->prev) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\thash->prev = &other.initialImplicitProducerHash;\n\t\t\t}\n\t\t}\n\t}\n\t\n\t// Only fails (returns nullptr) if memory allocation fails\n\tImplicitProducer* get_or_add_implicit_producer()\n\t{\n\t\t// Note that since the data is essentially thread-local (key is thread ID),\n\t\t// there's a reduced need for fences (memory ordering is already consistent\n\t\t// for any individual thread), except for the current table itself.\n\t\t\n\t\t// Start by looking for the thread ID in the current and all previous hash tables.\n\t\t// If it's not found, it must not be in there yet, since this same thread would\n\t\t// have added it previously to one of the tables that we traversed.\n\t\t\n\t\t// Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table\n\t\t\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\t\tdebug::DebugLock lock(implicitProdMutex);\n#endif\n\t\t\n\t\tauto id = details::thread_id();\n\t\tauto hashedId = details::hash_thread_id(id);\n\t\t\n\t\tauto mainHash = implicitProducerHash.load(std::memory_order_acquire);\n\t\tassert(mainHash != nullptr);  // silence clang-tidy and MSVC warnings (hash cannot be null)\n\t\tfor (auto hash = mainHash; hash != nullptr; hash = hash->prev) {\n\t\t\t// Look for the id in this hash\n\t\t\tauto index = hashedId;\n\t\t\twhile (true) {\t\t// Not an infinite loop because at least one slot is free in the hash table\n\t\t\t\tindex &= hash->capacity - 1;\n\t\t\t\t\n\t\t\t\tauto probedKey = hash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\tif (probedKey == id) {\n\t\t\t\t\t// Found it! If we had to search several hashes deep, though, we should lazily add it\n\t\t\t\t\t// to the current main hash table to avoid the extended search next time.\n\t\t\t\t\t// Note there's guaranteed to be room in the current hash table since every subsequent\n\t\t\t\t\t// table implicitly reserves space for all previous tables (there's only one\n\t\t\t\t\t// implicitProducerHashCount).\n\t\t\t\t\tauto value = hash->entries[index].value;\n\t\t\t\t\tif (hash != mainHash) {\n\t\t\t\t\t\tindex = hashedId;\n\t\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t\tindex &= mainHash->capacity - 1;\n\t\t\t\t\t\t\tprobedKey = mainHash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\t\t\t\tauto empty = details::invalid_thread_id;\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t\t\t\t\tauto reusable = details::invalid_thread_id2;\n\t\t\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed)) ||\n\t\t\t\t\t\t\t\t(probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire, std::memory_order_acquire))) {\n#else\n\t\t\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed))) {\n#endif\n\t\t\t\t\t\t\t\tmainHash->entries[index].value = value;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t++index;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\treturn value;\n\t\t\t\t}\n\t\t\t\tif (probedKey == details::invalid_thread_id) {\n\t\t\t\t\tbreak;\t\t// Not in this hash table\n\t\t\t\t}\n\t\t\t\t++index;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// Insert!\n\t\tauto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed);\n\t\twhile (true) {\n\t\t\t// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)\n\t\t\tif (newCount >= (mainHash->capacity >> 1) && !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) {\n\t\t\t\t// We've acquired the resize lock, try to allocate a bigger hash table.\n\t\t\t\t// Note the acquire fence synchronizes with the release fence at the end of this block, and hence when\n\t\t\t\t// we reload implicitProducerHash it must be the most recent version (it only gets changed within this\n\t\t\t\t// locked block).\n\t\t\t\tmainHash = implicitProducerHash.load(std::memory_order_acquire);\n\t\t\t\tif (newCount >= (mainHash->capacity >> 1)) {\n\t\t\t\t\tauto newCapacity = mainHash->capacity << 1;\n\t\t\t\t\twhile (newCount >= (newCapacity >> 1)) {\n\t\t\t\t\t\tnewCapacity <<= 1;\n\t\t\t\t\t}\n\t\t\t\t\tauto raw = static_cast<char*>((Traits::malloc)(sizeof(ImplicitProducerHash) + std::alignment_of<ImplicitProducerKVP>::value - 1 + sizeof(ImplicitProducerKVP) * newCapacity));\n\t\t\t\t\tif (raw == nullptr) {\n\t\t\t\t\t\t// Allocation failed\n\t\t\t\t\t\timplicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);\n\t\t\t\t\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_relaxed);\n\t\t\t\t\t\treturn nullptr;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\tauto newHash = new (raw) ImplicitProducerHash;\n\t\t\t\t\tnewHash->capacity = static_cast<size_t>(newCapacity);\n\t\t\t\t\tnewHash->entries = reinterpret_cast<ImplicitProducerKVP*>(details::align_for<ImplicitProducerKVP>(raw + sizeof(ImplicitProducerHash)));\n\t\t\t\t\tfor (size_t i = 0; i != newCapacity; ++i) {\n\t\t\t\t\t\tnew (newHash->entries + i) ImplicitProducerKVP;\n\t\t\t\t\t\tnewHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed);\n\t\t\t\t\t}\n\t\t\t\t\tnewHash->prev = mainHash;\n\t\t\t\t\timplicitProducerHash.store(newHash, std::memory_order_release);\n\t\t\t\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_release);\n\t\t\t\t\tmainHash = newHash;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\timplicitProducerHashResizeInProgress.clear(std::memory_order_release);\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t// If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table\n\t\t\t// to finish being allocated by another thread (and if we just finished allocating above, the condition will\n\t\t\t// always be true)\n\t\t\tif (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) {\n\t\t\t\tbool recycled;\n\t\t\t\tauto producer = static_cast<ImplicitProducer*>(recycle_or_create_producer(false, recycled));\n\t\t\t\tif (producer == nullptr) {\n\t\t\t\t\timplicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);\n\t\t\t\t\treturn nullptr;\n\t\t\t\t}\n\t\t\t\tif (recycled) {\n\t\t\t\t\timplicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed);\n\t\t\t\t}\n\t\t\t\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t\tproducer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback;\n\t\t\t\tproducer->threadExitListener.userData = producer;\n\t\t\t\tdetails::ThreadExitNotifier::subscribe(&producer->threadExitListener);\n#endif\n\t\t\t\t\n\t\t\t\tauto index = hashedId;\n\t\t\t\twhile (true) {\n\t\t\t\t\tindex &= mainHash->capacity - 1;\n\t\t\t\t\tauto probedKey = mainHash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\t\t\n\t\t\t\t\tauto empty = details::invalid_thread_id;\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\t\t\t\t\tauto reusable = details::invalid_thread_id2;\n\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed)) ||\n\t\t\t\t\t\t(probedKey == reusable && mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_acquire, std::memory_order_acquire))) {\n#else\n\t\t\t\t\tif ((probedKey == empty    && mainHash->entries[index].key.compare_exchange_strong(empty,    id, std::memory_order_relaxed, std::memory_order_relaxed))) {\n#endif\n\t\t\t\t\t\tmainHash->entries[index].value = producer;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\t++index;\n\t\t\t\t}\n\t\t\t\treturn producer;\n\t\t\t}\n\t\t\t\n\t\t\t// Hmm, the old hash is quite full and somebody else is busy allocating a new one.\n\t\t\t// We need to wait for the allocating thread to finish (if it succeeds, we add, if not,\n\t\t\t// we try to allocate ourselves).\n\t\t\tmainHash = implicitProducerHash.load(std::memory_order_acquire);\n\t\t}\n\t}\n\t\n#ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED\n\tvoid implicit_producer_thread_exited(ImplicitProducer* producer)\n\t{\n\t\t// Remove from thread exit listeners\n\t\tdetails::ThreadExitNotifier::unsubscribe(&producer->threadExitListener);\n\t\t\n\t\t// Remove from hash\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\t\tdebug::DebugLock lock(implicitProdMutex);\n#endif\n\t\tauto hash = implicitProducerHash.load(std::memory_order_acquire);\n\t\tassert(hash != nullptr);\t\t// The thread exit listener is only registered if we were added to a hash in the first place\n\t\tauto id = details::thread_id();\n\t\tauto hashedId = details::hash_thread_id(id);\n\t\tdetails::thread_id_t probedKey;\n\t\t\n\t\t// We need to traverse all the hashes just in case other threads aren't on the current one yet and are\n\t\t// trying to add an entry thinking there's a free slot (because they reused a producer)\n\t\tfor (; hash != nullptr; hash = hash->prev) {\n\t\t\tauto index = hashedId;\n\t\t\tdo {\n\t\t\t\tindex &= hash->capacity - 1;\n\t\t\t\tprobedKey = hash->entries[index].key.load(std::memory_order_relaxed);\n\t\t\t\tif (probedKey == id) {\n\t\t\t\t\thash->entries[index].key.store(details::invalid_thread_id2, std::memory_order_release);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t++index;\n\t\t\t} while (probedKey != details::invalid_thread_id);\t\t// Can happen if the hash has changed but we weren't put back in it yet, or if we weren't added to this hash in the first place\n\t\t}\n\t\t\n\t\t// Mark the queue as being recyclable\n\t\tproducer->inactive.store(true, std::memory_order_release);\n\t}\n\t\n\tstatic void implicit_producer_thread_exited_callback(void* userData)\n\t{\n\t\tauto producer = static_cast<ImplicitProducer*>(userData);\n\t\tauto queue = producer->parent;\n\t\tqueue->implicit_producer_thread_exited(producer);\n\t}\n#endif\n\t\n\t//////////////////////////////////\n\t// Utility functions\n\t//////////////////////////////////\n\n\ttemplate<typename TAlign>\n\tstatic inline void* aligned_malloc(size_t size)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)\n\t\t\treturn (Traits::malloc)(size);\n\t\telse {\n\t\t\tsize_t alignment = std::alignment_of<TAlign>::value;\n\t\t\tvoid* raw = (Traits::malloc)(size + alignment - 1 + sizeof(void*));\n\t\t\tif (!raw)\n\t\t\t\treturn nullptr;\n\t\t\tchar* ptr = details::align_for<TAlign>(reinterpret_cast<char*>(raw) + sizeof(void*));\n\t\t\t*(reinterpret_cast<void**>(ptr) - 1) = raw;\n\t\t\treturn ptr;\n\t\t}\n\t}\n\n\ttemplate<typename TAlign>\n\tstatic inline void aligned_free(void* ptr)\n\t{\n\t\tMOODYCAMEL_CONSTEXPR_IF (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)\n\t\t\treturn (Traits::free)(ptr);\n\t\telse\n\t\t\t(Traits::free)(ptr ? *(reinterpret_cast<void**>(ptr) - 1) : nullptr);\n\t}\n\n\ttemplate<typename U>\n\tstatic inline U* create_array(size_t count)\n\t{\n\t\tassert(count > 0);\n\t\tU* p = static_cast<U*>(aligned_malloc<U>(sizeof(U) * count));\n\t\tif (p == nullptr)\n\t\t\treturn nullptr;\n\n\t\tfor (size_t i = 0; i != count; ++i)\n\t\t\tnew (p + i) U();\n\t\treturn p;\n\t}\n\n\ttemplate<typename U>\n\tstatic inline void destroy_array(U* p, size_t count)\n\t{\n\t\tif (p != nullptr) {\n\t\t\tassert(count > 0);\n\t\t\tfor (size_t i = count; i != 0; )\n\t\t\t\t(p + --i)->~U();\n\t\t}\n\t\taligned_free<U>(p);\n\t}\n\n\ttemplate<typename U>\n\tstatic inline U* create()\n\t{\n\t\tvoid* p = aligned_malloc<U>(sizeof(U));\n\t\treturn p != nullptr ? new (p) U : nullptr;\n\t}\n\n\ttemplate<typename U, typename A1>\n\tstatic inline U* create(A1&& a1)\n\t{\n\t\tvoid* p = aligned_malloc<U>(sizeof(U));\n\t\treturn p != nullptr ? new (p) U(std::forward<A1>(a1)) : nullptr;\n\t}\n\n\ttemplate<typename U>\n\tstatic inline void destroy(U* p)\n\t{\n\t\tif (p != nullptr)\n\t\t\tp->~U();\n\t\taligned_free<U>(p);\n\t}\n\nprivate:\n\tstd::atomic<ProducerBase*> producerListTail;\n\tstd::atomic<std::uint32_t> producerCount;\n\t\n\tstd::atomic<size_t> initialBlockPoolIndex;\n\tBlock* initialBlockPool;\n\tsize_t initialBlockPoolSize;\n\t\n#ifndef MCDBGQ_USEDEBUGFREELIST\n\tFreeList<Block> freeList;\n#else\n\tdebug::DebugFreeList<Block> freeList;\n#endif\n\t\n\tstd::atomic<ImplicitProducerHash*> implicitProducerHash;\n\tstd::atomic<size_t> implicitProducerHashCount;\t\t// Number of slots logically used\n\tImplicitProducerHash initialImplicitProducerHash;\n\tstd::array<ImplicitProducerKVP, INITIAL_IMPLICIT_PRODUCER_HASH_SIZE> initialImplicitProducerHashEntries;\n\tstd::atomic_flag implicitProducerHashResizeInProgress;\n\t\n\tstd::atomic<std::uint32_t> nextExplicitConsumerId;\n\tstd::atomic<std::uint32_t> globalExplicitConsumerOffset;\n\t\n#ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH\n\tdebug::DebugMutex implicitProdMutex;\n#endif\n\t\n#ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG\n\tstd::atomic<ExplicitProducer*> explicitProducers;\n\tstd::atomic<ImplicitProducer*> implicitProducers;\n#endif\n};\n\n\ntemplate<typename T, typename Traits>\nProducerToken::ProducerToken(ConcurrentQueue<T, Traits>& queue)\n\t: producer(queue.recycle_or_create_producer(true))\n{\n\tif (producer != nullptr) {\n\t\tproducer->token = this;\n\t}\n}\n\ntemplate<typename T, typename Traits>\nProducerToken::ProducerToken(BlockingConcurrentQueue<T, Traits>& queue)\n\t: producer(reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->recycle_or_create_producer(true))\n{\n\tif (producer != nullptr) {\n\t\tproducer->token = this;\n\t}\n}\n\ntemplate<typename T, typename Traits>\nConsumerToken::ConsumerToken(ConcurrentQueue<T, Traits>& queue)\n\t: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)\n{\n\tinitialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release);\n\tlastKnownGlobalOffset = static_cast<std::uint32_t>(-1);\n}\n\ntemplate<typename T, typename Traits>\nConsumerToken::ConsumerToken(BlockingConcurrentQueue<T, Traits>& queue)\n\t: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)\n{\n\tinitialOffset = reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release);\n\tlastKnownGlobalOffset = static_cast<std::uint32_t>(-1);\n}\n\ntemplate<typename T, typename Traits>\ninline void swap(ConcurrentQueue<T, Traits>& a, ConcurrentQueue<T, Traits>& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\ninline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\ninline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\ntemplate<typename T, typename Traits>\ninline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT\n{\n\ta.swap(b);\n}\n\n}\n\n#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17)\n#pragma warning(pop)\n#endif\n\n#if defined(__GNUC__)\n#pragma GCC diagnostic pop\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/Makefile",
    "content": "all:\n\tg++ main.cpp lockfree_hash_table.cpp -pthread -std=c++11\nclean:\n\trm -rf a.out\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/benchmark_lockfree_ht.h",
    "content": "#ifndef BENCHMARK_LOCKFREE_HT\n#define BENCHMARK_LOCKFREE_HT\n\n#include <unordered_map>\n#include <iostream>\n#include <random>\n#include <algorithm>\n#include <pthread.h>\n#include <array>\n#include <unordered_map>\n\n#include \"cycle_timer.h\"\n#include \"lockfree_hash_table.h\"\n#include \"thread_service.h\"\n\n#define NUM_ITERS   3\n#define MAX_THREADS 24\n\n#define C_NUM_ELEMS 500\n\nclass BenchmarkLockFreeHT\n{\n  public:\n    BenchmarkLockFreeHT(int op_count, int capacity, \n                        int rweight, int idweight,\n                        int thread_count,\n                        double load_factor);\n\n    void benchmark_correctness();\n    void benchmark_hp();\n    void benchmark_all();\n    void run();\n\n  private:\n    int    m_rweight;\n    int    m_idweight;\n\n    int    m_thread_count;\n    int    m_op_count;\n    int    m_capacity;\n    double m_load_factor;\n};\n\nBenchmarkLockFreeHT::BenchmarkLockFreeHT(int op_count, int capacity, \n                                         int rweight, int idweight,\n                                         int thread_count, double load_factor)\n{\n  std::cout << \"*** BENCHMARKING LockFreeHT ***\" << std::endl;\n  m_op_count     = op_count;\n  m_load_factor  = load_factor; \n  m_capacity     = capacity;\n  m_thread_count = thread_count;\n\n  m_rweight      = rweight;\n  m_idweight     = idweight;\n}\n\nvoid BenchmarkLockFreeHT::benchmark_correctness()\n{\n  bool correct = true;\n\n  Lockfree_hash_table ht(2 * C_NUM_ELEMS, m_thread_count);\n  std::unordered_map<int, int> map;\n  map.reserve(2 * C_NUM_ELEMS);\n  \n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  int elems[C_NUM_ELEMS];\n  for (int i = 0; i < C_NUM_ELEMS; i++)\n  {\n    int k = rng(mt);\n    elems[i] = k;\n    map[k] = k;\n  }\n  \n  pthread_t  workers[MAX_THREADS];\n  WorkerArgs args[MAX_THREADS];\n\n  for (int i = 0; i < 2; i++)\n  {\n    args[i].num_elems = C_NUM_ELEMS / 2;\n    args[i].ht_p      = (void*)&ht;\n    args[i].elems     = elems;\n    args[i].start     = i * (C_NUM_ELEMS / 2);\n    args[i].tid       = i;\n\n    pthread_create(&workers[i], NULL, thread_insert<Lockfree_hash_table>, (void*)&args[i]);\n  }\n\n  for (int i = 0; i < 2; i++)\n  {\n    pthread_join(workers[i], NULL);\n  }\n\n  int count = 0;\n  for (std::pair<int, int> e : map)\n  {\n    std::pair<int, bool> r = ht.search(e.first, 0);\n    if (!r.second || e.second != r.first)\n    {\n\n      std::cout << \"\\t\" << \"Expected value, Received value, Received result = \" << e.second << \" \" << r.second << \" \"<< r.first << std::endl;\n      correct = false;\n      count++;\n    }\n  }\n\n  std::cout << \"\\t\" << count << \"/\" << C_NUM_ELEMS << \" errors\" << std::endl;\n\n  if (correct)\n    std::cout << \"\\t\" << \"Correctness test passed\" << std::endl;\n  else\n    std::cout << \"\\t\" << \"Correctness test failed\" << std::endl;\n\n}\n\nvoid BenchmarkLockFreeHT::benchmark_hp()\n{\n  Lockfree_hash_table ht(400000, m_thread_count);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = m_rweight;\n  weights[1] = m_idweight;\n  weights[2] = m_idweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int insert[200000];\n  for (int i = 0; i < 200000; i++)\n  {\n    int k = rng(mt);\n    int v = rng(mt);\n    insert[i] = k;\n    ht.insert(k, v, 0);\n  }\n  \n  pthread_t  workers[MAX_THREADS];\n  WorkerArgs args[MAX_THREADS];\n\n  int num_elems = 200000 / m_thread_count;\n  for (int i = 0; i < m_thread_count; i++)\n  {\n    args[i].num_elems = num_elems;\n    args[i].ht_p      = (void*)&ht;\n    args[i].elems     = insert;\n    args[i].start     = i * num_elems;\n    args[i].tid       = i;\n    args[i].remove    = i < (m_thread_count / 4);\n\n    pthread_create(&workers[i], NULL, thread_remove<Lockfree_hash_table>, (void*)&args[i]);\n  }\n  \n  for (int i = 0; i < m_thread_count; i++)\n  {\n    pthread_join(workers[i], NULL);\n  }\n   \n  std::cout << \"\\t\" << \"Hazard Pointer test passed\" << std::endl;\n\n}\n\nvoid BenchmarkLockFreeHT::benchmark_all()\n{\n    Lockfree_hash_table ht(m_capacity, m_thread_count);\n\n    std::random_device                 rd;\n    std::mt19937                       mt(rd());\n    std::uniform_int_distribution<int> rng;\n\n    std::array<int, 3> weights;\n    weights[0] = m_rweight;\n    weights[1] = m_idweight;\n    weights[2] = m_idweight;\n\n    std::default_random_engine         g;\n    std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n    // Warm-up table to load factor\n    int num_warmup = static_cast<int>(static_cast<double>(m_capacity) * m_load_factor);\n    for (int i = 0; i < num_warmup; i++)\n    {\n      int k = rng(mt); \n      int v = rng(mt);\n\n      ht.insert(k, v, 0);\n    }\n\n    // Run benchmark\n    std::vector<double> results;\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        pthread_create(&workers[i], NULL, thread_service<Lockfree_hash_table>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    double best_time = *std::min_element(results.begin(), results.end());\n    double avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput: \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput: \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n\n    int* keys = new int[m_op_count];\n\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        args[i].elems     = keys;\n        args[i].start     = i * num_elems;\n        pthread_create(&workers[i], NULL, thread_service_low_contention<Lockfree_hash_table>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (Low): \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (Low): \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        ht.insert(0, 0, 0);\n        pthread_create(&workers[i], NULL, thread_service_high_contention<Lockfree_hash_table>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (High): \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (High): \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n\n}\n\nvoid BenchmarkLockFreeHT::run()\n{\n  benchmark_correctness();\n  benchmark_hp();\n  benchmark_all();\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/benchmark_unordered_map.h",
    "content": "#ifndef BENCHMARK_UNORDERED_MAP\n#define BENCHMARK_UNORDERED_MAP\n\n#include <unordered_map>\n#include <iostream>\n#include <random>\n#include <algorithm>\n#include <array>\n\n#include \"cycle_timer.h\"\n\n#define NUM_ITERS 3\n\nclass BenchmarkUnorderedMap\n{\n  public:\n    BenchmarkUnorderedMap(int op_count, int capacity, \n                          int rweight, int idweight, \n                          double load_factor);\n\n    void benchmark_all();\n    void run();\n  private:\n    int    m_rweight;\n    int    m_idweight;\n\n    int    m_op_count;\n    int    m_capacity;\n    double m_load_factor;\n};\n\nBenchmarkUnorderedMap::BenchmarkUnorderedMap(int op_count, int capacity, \n                                             int rweight, int idweight,\n                                             double load_factor)\n{\n  std::cout << \"*** BENCHMARKING UnorderedMap ***\" << std::endl;\n  m_op_count    = op_count;\n  m_load_factor = load_factor; \n  m_capacity    = capacity;\n\n  m_rweight     = rweight;\n  m_idweight    = idweight;\n}\n\nvoid BenchmarkUnorderedMap::benchmark_all()\n{\n    std::unordered_map<int, int> map;\n    map.reserve(m_capacity);\n\n    std::random_device                 rd;\n    std::mt19937                       mt(rd());\n    std::uniform_int_distribution<int> rng;\n\n    std::array<int, 3> weights;\n    weights[0] = m_rweight;\n    weights[1] = m_idweight;\n    weights[2] = m_idweight;\n\n    std::default_random_engine         g;\n    std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n    // Warm-up table to load factor\n    int num_warmup = static_cast<int>(static_cast<double>(m_capacity) * m_load_factor);\n    for (int i = 0; i < num_warmup; i++)\n    {\n      int k = rng(mt); \n      int v = rng(mt);\n      map[k] = v;\n    }\n\n    // Run benchmark (single-threaded)\n    std::vector<double> results;\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_op_count; i++)\n      {\n        int k = rng(mt);\n        int v = rng(mt);\n        int a = drng(g);\n\n        if (a == 0)\n          map.find(k);\n        else if (a == 1)\n          map[k] = v;\n        else\n          map.erase(k);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    double best_time = *std::min_element(results.begin(), results.end());\n    double avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput: \" << static_cast<double>(m_op_count) / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput: \" << static_cast<double>(m_op_count) / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n    int *keys = new int[m_op_count];\n    int s = 0;\n    int e = 0;\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_op_count; i++)\n      {\n        int k = rng(mt);\n        int v = rng(mt);\n        int a = drng(g);\n\n        if (s == e || a == 1) {\n          map[k] = v;\n          keys[e++] = k;\n        } else if (a == 0) {\n          map.find(keys[k % (e - s) + s]);\n        } else {\n          map.erase(keys[s++]);\n        }\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (Low): \" << static_cast<double>(m_op_count) / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (Low): \" << static_cast<double>(m_op_count) / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      double start = CycleTimer::currentSeconds();\n      map[0] = 0;\n      for (int i = 0; i < m_op_count; i++)\n      {\n        int x = map[0];\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (High): \" << static_cast<double>(m_op_count) / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (High): \" << static_cast<double>(m_op_count) / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n}\n\nvoid BenchmarkUnorderedMap::run()\n{\n  benchmark_all();\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/cycle_timer.h",
    "content": "#ifndef _SYRAH_CYCLE_TIMER_H_\n#define _SYRAH_CYCLE_TIMER_H_\n\n#if defined(__APPLE__)\n  #if defined(__x86_64__)\n    #include <sys/sysctl.h>\n  #else\n    #include <mach/mach.h>\n    #include <mach/mach_time.h>\n  #endif // __x86_64__ or not\n\n  #include <stdio.h>  // fprintf\n  #include <stdlib.h> // exit\n\n#elif _WIN32\n#  include <windows.h>\n#  include <time.h>\n#else\n#  include <stdio.h>\n#  include <stdlib.h>\n#  include <string.h>\n#  include <sys/time.h>\n#endif\n\n\n  // This uses the cycle counter of the processor.  Different\n  // processors in the system will have different values for this.  If\n  // you process moves across processors, then the delta time you\n  // measure will likely be incorrect.  This is mostly for fine\n  // grained measurements where the process is likely to be on the\n  // same processor.  For more global things you should use the\n  // Time interface.\n\n  // Also note that if you processors' speeds change (i.e. processors\n  // scaling) or if you are in a heterogenous environment, you will\n  // likely get spurious results.\n  class CycleTimer {\n  public:\n    typedef unsigned long long SysClock;\n\n    //////////\n    // Return the current CPU time, in terms of clock ticks.\n    // Time zero is at some arbitrary point in the past.\n    static SysClock currentTicks() {\n#if defined(__APPLE__) && !defined(__x86_64__)\n      return mach_absolute_time();\n#elif defined(_WIN32)\n      LARGE_INTEGER qwTime;\n      QueryPerformanceCounter(&qwTime);\n      return qwTime.QuadPart;\n#elif defined(__x86_64__)\n      unsigned int a, d;\n      asm volatile(\"rdtsc\" : \"=a\" (a), \"=d\" (d));\n      return static_cast<unsigned long long>(a) |\n        (static_cast<unsigned long long>(d) << 32);\n#elif defined(__ARM_NEON__) && 0 // mrc requires superuser.\n      unsigned int val;\n      asm volatile(\"mrc p15, 0, %0, c9, c13, 0\" : \"=r\"(val));\n      return val;\n#else\n      timespec spec;\n      clock_gettime(CLOCK_THREAD_CPUTIME_ID, &spec);\n      return CycleTimer::SysClock(static_cast<float>(spec.tv_sec) * 1e9 + static_cast<float>(spec.tv_nsec));\n#endif\n    }\n\n    //////////\n    // Return the current CPU time, in terms of seconds.\n    // This is slower than currentTicks().  Time zero is at\n    // some arbitrary point in the past.\n    static double currentSeconds() {\n      return currentTicks() * secondsPerTick();\n    }\n\n    //////////\n    // Return the conversion from seconds to ticks.\n    static double ticksPerSecond() {\n      return 1.0/secondsPerTick();\n    }\n\n    static const char* tickUnits() {\n#if defined(__APPLE__) && !defined(__x86_64__)\n      return \"ns\";\n#elif defined(__WIN32__) || defined(__x86_64__)\n      return \"cycles\";\n#else\n      return \"ns\"; // clock_gettime\n#endif\n    }\n\n    //////////\n    // Return the conversion from ticks to seconds.\n    static double secondsPerTick() {\n      static bool initialized = false;\n      static double secondsPerTick_val;\n      if (initialized) return secondsPerTick_val;\n#if defined(__APPLE__)\n  #ifdef __x86_64__\n      int args[] = {CTL_HW, HW_CPU_FREQ};\n      unsigned int Hz;\n      size_t len = sizeof(Hz);\n      if (sysctl(args, 2, &Hz, &len, NULL, 0) != 0) {\n         fprintf(stderr, \"Failed to initialize secondsPerTick_val!\\n\");\n         exit(-1);\n      }\n      secondsPerTick_val = 1.0 / (double) Hz;\n  #else\n      mach_timebase_info_data_t time_info;\n      mach_timebase_info(&time_info);\n\n      // Scales to nanoseconds without 1e-9f\n      secondsPerTick_val = (1e-9*static_cast<double>(time_info.numer))/\n        static_cast<double>(time_info.denom);\n  #endif // x86_64 or not\n#elif defined(_WIN32)\n      LARGE_INTEGER qwTicksPerSec;\n      QueryPerformanceFrequency(&qwTicksPerSec);\n      secondsPerTick_val = 1.0/static_cast<double>(qwTicksPerSec.QuadPart);\n#else\n      FILE *fp = fopen(\"/proc/cpuinfo\",\"r\");\n      char input[1024];\n      if (!fp) {\n         fprintf(stderr, \"CycleTimer::resetScale failed: couldn't find /proc/cpuinfo.\");\n         exit(-1);\n      }\n      // In case we don't find it, e.g. on the N900\n      secondsPerTick_val = 1e-9;\n      while (!feof(fp) && fgets(input, 1024, fp)) {\n        // NOTE(boulos): Because reading cpuinfo depends on dynamic\n        // frequency scaling it's better to read the @ sign first\n        float GHz, MHz;\n        if (strstr(input, \"model name\")) {\n          char* at_sign = strstr(input, \"@\");\n          if (at_sign) {\n            char* after_at = at_sign + 1;\n            char* GHz_str = strstr(after_at, \"GHz\");\n            char* MHz_str = strstr(after_at, \"MHz\");\n            if (GHz_str) {\n              *GHz_str = '\\0';\n              if (1 == sscanf(after_at, \"%f\", &GHz)) {\n                //printf(\"GHz = %f\\n\", GHz);\n                secondsPerTick_val = 1e-9f / GHz;\n                break;\n              }\n            } else if (MHz_str) {\n              *MHz_str = '\\0';\n              if (1 == sscanf(after_at, \"%f\", &MHz)) {\n                //printf(\"MHz = %f\\n\", MHz);\n                secondsPerTick_val = 1e-6f / MHz;\n                break;\n              }\n            }\n          }\n        } else if (1 == sscanf(input, \"cpu MHz : %f\", &MHz)) {\n          //printf(\"MHz = %f\\n\", MHz);\n          secondsPerTick_val = 1e-6f / MHz;\n          break;\n        }\n      }\n      fclose(fp);\n#endif\n\n      initialized = true;\n      return secondsPerTick_val;\n    }\n\n    //////////\n    // Return the conversion from ticks to milliseconds.\n    static double msPerTick() {\n      return secondsPerTick() * 1000.0;\n    }\n\n  private:\n    CycleTimer();\n  };\n\n#endif // #ifndef _SYRAH_CYCLE_TIMER_H_\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/hash_table.h",
    "content": "#ifndef HASH_TABLE\n#define HASH_TABLE\n\n#include <utility>\n\nstruct Hash_table {\n  virtual std::pair<int, bool> search(int key) = 0; \n  virtual void                 insert(int key, int val) = 0;\n  virtual void                 remove(int key) = 0;\n};\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/lockfree_hash_table.cpp",
    "content": "#include \"lockfree_hash_table.h\"\n#include <cstdint>\n#include <iostream>\n#include <algorithm>\n\n#define THRESHOLD   50\n#define R           25\n#define MAX_BUFSIZE 128\n#define HP_COUNT    2\n\n// Inline bit twiddling functions\ninline Count_ptr make_pointer(Hash_entry* e, uint16_t count) {\n  return (Count_ptr)((((uint64_t)count) << 48) | ((uint64_t)e & 0xFFFFFFFFFFFF));\n}\ninline Hash_entry* get_pointer(Count_ptr ptr) {\n  return (Hash_entry*)((uint64_t)ptr & 0xFFFFFFFFFFFE);\n}\n\ninline uint16_t get_counter(Count_ptr ptr) { \n  return (uint16_t)(((uint64_t)ptr >> 48) & 0xFFFF);\n}\n\ninline bool get_marked(Hash_entry *ent) {\n  return ((uint64_t)ent & 1) == 1;\n}\n\ninline Hash_entry *set_marked(Hash_entry *ent, bool marked) {\n  return marked ? (Hash_entry*)((uint64_t)ent | 1) \n                : (Hash_entry*)((uint64_t)ent & (~1));\n}\n\nLockfree_hash_table::Lockfree_hash_table(int capacity, int thread_count) {\n  size1 = capacity / 2;\n  size2 = capacity - size1;\n\n  table[0] = new Count_ptr[size1]();\n  table[1] = new Count_ptr[size2]();\n\n  hp_rec.reserve(thread_count);\n  rlist.reserve(thread_count);\n  rcount.reserve(thread_count);\n\n  for (int i = 0; i < thread_count; i++)\n  {\n    hp_rec[i][0] = NULL;\n    hp_rec[i][1] = NULL;\n    rcount[i]    = 0;\n  }\n\n}\n\nLockfree_hash_table::~Lockfree_hash_table() {\n\n  for (int i = 0; i < size1; i++)\n  {\n    Hash_entry* node = get_pointer(table[0][i]);\n    if (node != NULL)\n      delete node;\n  }\n  \n  for (int i = 0; i < size2; i++)\n  {\n    Hash_entry* node = get_pointer(table[1][i]);\n    if (node != NULL)\n      delete node;\n  }\n  \n  delete table[0];\n  delete table[1];\n}\n\nvoid rehash() {\n  return;\n}\n// HP functions\nvoid Lockfree_hash_table::retire_node(Hash_entry* node, int tid) {\n  rlist[tid][rcount[tid]] = node;\n  rcount[tid]++;\n\n  if (rcount[tid] > R)\n    scan(tid);\n}\n\nvoid Lockfree_hash_table::scan(int tid) {\n  // Stage 1\n  int size = 0;\n  std::vector<Hash_entry*> plist;\n  for (int i = 0; i < hp_rec.size(); i++)\n  {\n    for (int j = 0; j < hp_rec[i].size(); j++)\n    {\n      Hash_entry* hptr = hp_rec[i][j];\n      if (hptr != NULL)\n      {\n        plist.push_back(hptr);\n      }\n    }\n  }\n\n  // Stage 2\n  int n = rcount[tid];\n  rcount[tid] = 0;\n  for (int i = 0; i < n; i++)\n  {\n    if (std::find(plist.begin(), plist.end(), rlist[tid][i]) != plist.end())\n    {\n      rlist[tid][rcount[tid]] = rlist[tid][i];\n      rcount[tid]++;\n    }\n    else\n    {\n      //printf(\"freed %p\\n\", rlist[tid][i]);\n      delete rlist[tid][i];\n    }\n  }\n}\n\n// Private\nint Lockfree_hash_table::hash1(int key) {\n  int c2=0x27d4eb2d; // a prime or an odd constant\n  key = (key ^ 61) ^ (key >> 16);\n  key = key + (key << 3);\n  key = key ^ (key >> 4);\n  key = key * c2;\n  key = key ^ (key >> 15);\n  return key % size1;\n}\n\nint Lockfree_hash_table::hash2(int key) {\n  key = ((key >> 16) ^ key) * 0x45d9f3b;\n  key = ((key >> 16) ^ key) * 0x45d9f3b;\n  key = (key >> 16) ^ key;\n  return key % size2;\n}\n\nbool Lockfree_hash_table::check_counter(int ts1, int ts2, int ts1x, int ts2x) {\n  return (ts1x >= ts1 + 2) && (ts2x >= ts2 + 2) && (ts2x >= ts1 + 3);\n}\n\nFind_result Lockfree_hash_table::find(int key, Count_ptr &ptr1, Count_ptr &ptr2, int tid) {\n  int h1 = hash1(key);\n  int h2 = hash2(key);\n\n  Find_result result = (Find_result)-1;\n\n  while (true) {\n    //std::cout << \"Find inf loop\" << std::endl;\n    ptr1 = table[0][h1];\n    int ts1 = get_counter(ptr1);\n    \n    hp_rec[tid][0] = get_pointer(ptr1);\n    if (get_pointer(ptr1) != get_pointer(table[0][h1]))\n      continue;\n\n    if (get_pointer(ptr1)) {\n      if (get_marked(ptr1)) {\n        help_relocate(0, h1, false, tid);\n        continue; \n      }\n\n      if (get_pointer(ptr1)->key == key) \n        result = FIRST; \n    }\n\n    ptr2 = table[1][h2];\n    int ts2 = get_counter(ptr2);\n\n    hp_rec[tid][1] = get_pointer(ptr2);\n    if (get_pointer(ptr2) != get_pointer(table[1][h2]))\n      continue;\n\n    if (get_pointer(ptr2)) {\n      if (get_marked(ptr2)) {\n        help_relocate(1, h2, false, tid);\n        continue; \n      }\n\n      if (get_pointer(ptr2)->key == key) {\n        if (result == FIRST) {\n          del_dup(h1, ptr1, h2, ptr2, tid);\n        } else {\n          result = SECOND;\n        }\n      }\n    }\n\n    if (result == FIRST || result == SECOND) {\n      return result;\n    }\n\n    ptr1 = table[0][h1];\n    ptr2 = table[1][h2];\n\n    if (check_counter(ts1, ts2, get_counter(ptr1), get_counter(ptr2))) {\n      continue;\n    } else {\n      return NIL;\n    }\n  }\n}\n\nbool Lockfree_hash_table::relocate(int which, int index, int tid) {\ntry_again:\n  int  route[THRESHOLD];\n  Count_ptr pptr   = NULL;\n  int  pre_idx     = 0;\n  int  start_level = 0;\n  int  tbl         = which;\n  int  idx         = index;\n\n\npath_discovery:\n  bool found = false;\n  int depth = start_level;\n  do\n  {\n    Count_ptr ptr1 = table[tbl][idx];\n    \n    while (get_marked(ptr1))\n    {\n      help_relocate(tbl, idx, false, tid);\n      ptr1 = table[tbl][idx];\n    }\n\n    Hash_entry* e1 = get_pointer(ptr1);\n    Hash_entry* p1 = get_pointer(pptr);\n    hp_rec[tid][0] = e1;\n    if (e1 != get_pointer(table[tbl][idx]))\n      goto try_again;\n    /*\n    if (p1 && e1 && e1->key == p1->key)\n    {\n      if (tbl == 0)\n        del_dup(idx, ptr1, pre_idx, pptr, tid);\n      else\n        del_dup(pre_idx, pptr, idx, ptr1, tid);\n    }\n    */\n    if (e1 != nullptr)\n    {\n      route[depth] = idx;\n      int key = e1->key; \n      pptr    = ptr1;\n      pre_idx = idx;\n      tbl     = 1 - tbl;\n      idx     = (tbl == 0) ? hash1(key) : hash2(key); \n    }\n    else\n    {\n      found = true;\n    }\n  } while (!found && ++depth < THRESHOLD);\n\n  if (found)\n  {\n    tbl = 1 - tbl;\n    for (int i = depth-1; i >= 0; i--, tbl = 1 - tbl)\n    {\n      idx = route[i];\n      Count_ptr ptr1 = table[tbl][idx];\n      /*\n      hp_rec[tid][0] = get_pointer(ptr1);\n      if (get_pointer(ptr1) != get_pointer(table[tbl][idx]))\n        goto try_again;\n      */\n      if (get_marked(ptr1))\n      {\n        help_relocate(tbl, idx, false, tid);\n        hp_rec[tid][0] = table[tbl][idx];\n        ptr1 = hp_rec[tid][0];\n        /*\n        if (get_pointer(ptr1) != get_pointer(table[tbl][idx]))\n          goto try_again;\n         */\n      }\n\n      Hash_entry* e1 = get_pointer(ptr1);\n      if (e1 == nullptr)\n        continue;\n\n      int dest_idx = (tbl == 0) ? hash2(e1->key) : hash1(e1->key);\n      Count_ptr ptr2 = table[1-tbl][dest_idx];\n      Hash_entry* e2 = get_pointer(ptr2);\n\n      if (e2 != nullptr)\n      {\n        start_level = i + 1;\n        idx = dest_idx;\n        tbl = 1 - tbl;\n        goto path_discovery;\n      }\n      help_relocate(tbl, idx, true, tid);\n    }\n  }\n\n  return found;\n}\n\nvoid Lockfree_hash_table::help_relocate(int which, int index, bool initiator, int tid) {\n  while (1)\n  {\n    //std::cout << \"help_relocate inf loop\" << std::endl;\n    Count_ptr ptr1 = table[which][index];\n    Hash_entry* src = get_pointer(ptr1);\n    hp_rec[tid][0] = src;\n    if (ptr1 != table[which][index])\n      continue;\n\n    while (initiator && !get_marked(ptr1))\n    {\n      //std::cout << \"help_relocate mark inf loop\" << std::endl;\n      if (src == nullptr)\n        return;\n\n      __sync_bool_compare_and_swap(&table[which][index], ptr1, \n                                   set_marked(ptr1, 1));\n      ptr1 = table[which][index];\n      hp_rec[tid][0] = ptr1;\n      if (ptr1 != table[which][index])\n        continue;\n      src  = get_pointer(ptr1);\n    }\n\n    if (!get_marked(ptr1))\n      return;\n\n    int hd = ((1 - which) == 0) ? hash1(src->key) : hash2(src->key);\n    Count_ptr ptr2 = table[1-which][hd];\n    Hash_entry* dst = get_pointer(ptr2);\n    hp_rec[tid][1] = dst;\n    if (ptr2 != table[1-which][hd])\n      continue;\n\n    uint16_t ts1 = get_counter(ptr1);\n    uint16_t ts2 = get_counter(ptr2);\n\n    if (dst == nullptr)\n    {\n      int nCnt = ts1 > ts2 ? ts1 + 1 : ts2 + 1;\n      \n      if (ptr1 != table[which][index])\n        continue;\n      \n      if (__sync_bool_compare_and_swap(&table[1-which][hd], ptr2, \n                                       make_pointer(src, nCnt)))\n      {\n        __sync_bool_compare_and_swap(&table[which][index], ptr1, \n                                     make_pointer(nullptr, ts1+1));\n        return;\n      }\n    }\n\n    if (src == dst)\n    {\n      __sync_bool_compare_and_swap(&table[which][index], ptr1, \n                                   make_pointer(nullptr, ts1+1));\n      return;\n    }\n\n    __sync_bool_compare_and_swap(&table[which][index], ptr1, \n                                 make_pointer(set_marked(src, 0), ts1+1));\n    return;\n    \n  }\n}\n\nvoid Lockfree_hash_table::del_dup(int idx1, Count_ptr ptr1, int idx2, Count_ptr ptr2, int tid) {\n  hp_rec[tid][0] = ptr1;\n  hp_rec[tid][1] = ptr2;\n  if (ptr1 != table[0][idx1] && ptr2 != table[1][idx2])\n    return;\n  if (get_pointer(ptr1)->key != get_pointer(ptr2)->key)\n    return;\n\n  __sync_bool_compare_and_swap(&table[1][idx2], ptr2, \n                               make_pointer(nullptr, get_counter(ptr2)));\n}\n  \n// Public\nstd::pair<int, bool> Lockfree_hash_table::search(int key, int tid) {\n  int h1 = hash1(key);\n  int h2 = hash2(key);\n\n  while (true) {\n    //std::cout << \"search inf loop \" << key << std::endl;\n    Count_ptr ptr1 = table[0][h1]; \n    Hash_entry *e1 = get_pointer(ptr1);\n    \n    hp_rec[tid][0] = e1;\n    if (ptr1 != table[0][h1])\n      continue;\n\n    int ts1 = get_counter(ptr1);\n\n    if (e1 && e1->key == key)\n      return std::make_pair(e1->val, true);\n\n    Count_ptr ptr2 = table[1][h2];\n    Hash_entry *e2 = get_pointer(ptr2);\n\n    hp_rec[tid][0] = e2;\n    if (ptr2 != table[1][h2])\n      continue;\n\n    int ts2 = get_counter(ptr2);\n\n    if (e2 && e2->key == key)\n      return std::make_pair(e2->val, true);\n\n    int ts1x = get_counter(table[0][h1]);\n    int ts2x = get_counter(table[1][h2]);\n\n    if (check_counter(ts1, ts2, ts1x, ts2x))\n      continue;\n    else\n      return std::make_pair(0, false);\n  }\n\n  return std::make_pair(0, false);\n}\n\nvoid Lockfree_hash_table::insert(int key, int val, int tid) {\n  Count_ptr ptr1, ptr2;\n\n  Hash_entry *new_node = new Hash_entry();\n  new_node->key = key;\n  new_node->val = val;\n\n  int h1 = hash1(key);\n  int h2 = hash2(key);\n\n\n  while (true) {\n    //std::cout << \"Inserting \" << key << std::endl;\n    Find_result result = find(key, ptr1, ptr2, tid);\n\n    if (result == FIRST) {\n      get_pointer(ptr1)->val = val; \n      return;\n    }\n\n    if (result == SECOND) {\n      get_pointer(ptr2)->val = val;\n      return;\n    }\n\n    if (!get_pointer(ptr1)) { \n      if (!__sync_bool_compare_and_swap(\n            &table[0][h1], ptr1, make_pointer(new_node, get_counter(ptr1)))) {\n        continue; \n      }\n      return;\n    }\n\n    if (!get_pointer(ptr2)) { \n      if (!__sync_bool_compare_and_swap(\n            &table[1][h2], ptr2, make_pointer(new_node, get_counter(ptr2)))) {\n        continue; \n      }\n      return;\n    }\n\n    if (relocate(0, h1, tid)) {\n      continue;\n    } else {\n      rehash();\n      return;\n    }\n  }\n}\n\nvoid Lockfree_hash_table::remove(int key, int tid) {\n  int h1 = hash1(key);\n  int h2 = hash2(key);\n\n  Count_ptr e1;\n  Count_ptr e2;\n\n  while (true) {\n    //std::cout << \"remove inf loop\" << std::endl;\n    Find_result ret = find(key, e1, e2, tid);\n\n    if (ret == NIL) return;\n\n    if (ret == FIRST) {\n      if (__sync_bool_compare_and_swap(\n            &table[0][h1], e1, make_pointer(nullptr, get_counter(e1)))) {\n        retire_node(get_pointer(e1), tid);\n        return;\n      }\n    } else if (ret == SECOND) {\n      if (table[0][h1] != e1) \n        continue;\n      if (__sync_bool_compare_and_swap(\n            &table[1][h2], e2, make_pointer(nullptr, get_counter(e2)))) {\n        retire_node(get_pointer(e2), tid);\n        return;\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/lockfree_hash_table.h",
    "content": "#ifndef LOCKFREE_HASH_TABLE\n#define LOCKFREE_HASH_TABLE\n\n#define MAX_BUF 256\n\n#include \"hash_table.h\"\n#include <vector>\n#include <array>\n\nstruct Hash_entry {\n  int key;\n  int val;\n};\n\n// Alternate count_ptr definition using unused bits\ntypedef Hash_entry* Count_ptr;\n\nenum Find_result { FIRST, SECOND, NIL };\n\nstruct Lockfree_hash_table {\n  Lockfree_hash_table(int capacity, int thread_count);\n  ~Lockfree_hash_table();\n  \n  std::pair<int, bool> search(int key, int tid);\n  void                 insert(int key, int val, int tid);\n  void                 remove(int key, int tid);\n\nprivate:\n  Count_ptr *table[2];  \n  int size1;\n  int size2;\n\n  std::vector<std::array<Hash_entry*, MAX_BUF>>   rlist;\n  std::vector<int>                                rcount;\n  std::vector<std::array<Hash_entry*, 2>>         hp_rec;\n\n  int hash1(int key);\n  int hash2(int key);\n  bool check_counter(int ts1, int ts2, int ts1x, int ts2x);\n  Find_result find(int key, Count_ptr &ptr1, Count_ptr &ptr2, int tid);\n  bool relocate(int which, int index, int tid);\n  void help_relocate(int which, int index, bool initiator, int tid);\n  void del_dup(int idx1, Count_ptr ptr1, int idx2, Count_ptr ptr2, int tid);\n\n  void retire_node(Hash_entry* node, int tid);\n  void scan(int tid);\n};\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/main.cpp",
    "content": "#include \"benchmark_unordered_map.h\"\n#include \"benchmark_lockfree_ht.h\"\n//#include \"benchmark_tbb.h\"\n\n#include \"thread_service.h\"\n#include \"cycle_timer.h\"\n\n#include <iostream>\n\n#include <ctype.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n\n#define DEFAULT_OP_COUNT     2000000\n#define DEFAULT_THREAD_COUNT 24\n#define DEFAULT_READ_PERCENT 90\n#define DEFAULT_LOAD_FACTOR  40\n#define CAPACITY             8000016\n\nint main(int argc, char *argv[])\n{\n  char c;\n  int  op_count     = DEFAULT_OP_COUNT; \n  int  num_threads  = DEFAULT_THREAD_COUNT;\n  int  read_percent = DEFAULT_READ_PERCENT;\n  int  load_factor  = DEFAULT_LOAD_FACTOR;\n\n  char *out_file   = NULL;\n\n  // Parse cmd args\n  while ((c = getopt(argc, argv, \"n:t:or:hl:\")) != -1)\n  {\n    switch (c)\n    {\n      case 'n':\n        op_count = atoi(optarg);\n        break;\n      case 't':\n        printf(\"Here\");\n        num_threads = atoi(optarg);\n        break;\n      case 'o':\n        out_file = optarg;\n        break;\n      case 'r':\n        read_percent = atoi(optarg);\n        break;\n      case 'l':\n        load_factor = atoi(optarg);\n        break;\n      case 'h':\n        printf(\"Options: \\n\"\n               \"-n num_elements \\n\"\n               \"-t num_threads \\n\"\n               \"-l load_factor \\n\"\n               \"-r read_percent \\n\"\n               \"-o output_file \\n\");\n        break;\n      default:\n        break;\n    }\n  }\n\n  int    rweight  = read_percent;\n  int    idweight = 100 - read_percent;\n  double lfactor  = load_factor / 100.0;\n\n  printf(\"%d\", num_threads);\n\n  // Run tests\n  std::cout << \"*** STARTING Benchmark ***\" << std::endl;\n  std::cout << \"Parameters: \" << std::endl;\n  std::cout << \"\\t\" << \"op_count     : \" << op_count << std::endl;\n  std::cout << \"\\t\" << \"num_threads  : \" << num_threads << std::endl;\n  std::cout << \"\\t\" << \"load_factor  : \" << load_factor << \"%\" << std::endl;\n  std::cout << \"\\t\" << \"read_percent : \" << read_percent << \"%\" << std::endl;\n\n  BenchmarkUnorderedMap benchmark_unordered_map(op_count, CAPACITY, rweight, idweight, lfactor);\n  benchmark_unordered_map.run();\n\n//  BenchmarkTBB benchmark_tbb(op_count, CAPACITY, rweight, idweight, num_threads, lfactor);\n//  benchmark_tbb.run();\n\n  BenchmarkLockFreeHT benchmark_lockfree_ht(op_count, CAPACITY, rweight, idweight, num_threads, lfactor);\n  benchmark_lockfree_ht.run();\n\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/cuckoo/thread_service.h",
    "content": "#ifndef THREAD_SERVICE\n#define THREAD_SERVICE\n\n#include <random>\n#include <array>\n\nstruct WorkerArgs \n{\n  int    num_elems;\n  // R/I/D weights, normalized to 100\n  int    rweight;\n  int    iweight;\n  int    dweight; \n  void*  ht_p;\n\n  bool   remove;\n  int    tid;\n  int    start;\n  int*   elems;\n};\n\ntemplate<typename T>\nvoid* thread_service(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  for (int i = 0; i < num_elems; i++)\n  {\n    // Key, Value pair\n    int k = rng(mt);\n    int v = rng(mt);\n    // Action : 0 -> Search, 1 -> Insert, 2 -> Remove\n    int a = drng(g);\n\n    if (a == 0)\n      ht_p->search(k, tid);\n    else if (a == 1)\n      ht_p->insert(k, v, tid);\n    else\n      ht_p->remove(k, tid);\n  }\n}\n\ntemplate<typename T>\nvoid* thread_service_low_contention(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  int *keys = (args->elems + args->start);\n\n  int start = 0;\n  int end = 0;\n  for (int i = 0; i < num_elems; i++)\n  {\n    // Action : 0 -> Search, 1 -> Insert, 2 -> Remove\n    int a = drng(g);\n\n    if (start == end || a == 1) \n    {\n      int k = rng(mt) % num_elems + tid * num_elems; \n      keys[end++] = k;\n      ht_p->insert(k, k, tid);\n    }\n    else if (a == 0)\n    {\n      int k = rng(mt) % (end - start) + start;\n      ht_p->search(k, tid);\n    }\n    else\n    {\n      int k = keys[start++];\n      ht_p->remove(k, tid);\n    }\n  }\n}\n\ntemplate<typename T>\nvoid* thread_service_high_contention(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  for (int i = 0; i < num_elems; i++)\n  {\n    ht_p->search(0, tid);\n  }\n}\n\ntemplate<typename T>\nvoid* thread_insert(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n    ht_p->insert(elems[i], elems[i], tid);\n  }\n  \n}\n\ntemplate<typename T>\nvoid* thread_remove(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n  bool remove    = args->remove;\n  \n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng(0, 200000 - 1);\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n    if (remove)\n      ht_p->remove(elems[i], tid);\n    else\n      ht_p->search(elems[rng(mt)], tid);\n  }\n\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/Makefile",
    "content": "all:\n\tg++ main.cc -std=c++14 -mcx16 -march=native -pthread\nclean:\n\trm -rf a.out\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/alloc.h",
    "content": "#pragma once\n\nnamespace pbbs {\n  void* my_alloc(size_t);\n  void my_free(void*);\n}\n\n#include <atomic>\n#include <vector>\n#include <new>\n#include \"utilities.h\"\n#include \"concurrent_stack.h\"\n#include \"utilities.h\"\n#include \"block_allocator.h\"\n#include \"memory_size.h\"\n#include \"get_time.h\"\n\nnamespace pbbs {\n\n#if defined(__APPLE__) // a little behind the times\n  void* aligned_alloc(size_t, size_t n) {return malloc(n);}\n#endif\n\n\n  // ****************************************\n  //    pool_allocator\n  // ****************************************\n\n  // Allocates headerless blocks from pools of different sizes.\n  // A vector of pool sizes is given to the constructor.\n  // Sizes must be at least 8, and must increase.\n  // For pools of small blocks (below large_threshold) each thread keeps a\n  //   thread local list of elements from each pool using the\n  //   block_allocator.\n  // For pools of large blocks there is only one shared pool for each.\n  struct pool_allocator {\n\n  private:\n    static const size_t large_align = 64;\n    static const size_t large_threshold = (1 << 20);\n    size_t num_buckets;\n    size_t num_small;\n    size_t max_small;\n    size_t max_size;\n    std::atomic<long> large_allocated{0};\n  \n    concurrent_stack<void*>* large_buckets;\n    struct block_allocator *small_allocators;\n    std::vector<size_t> sizes;\n\n    void* allocate_large(size_t n) {\n\n      size_t bucket = num_small;\n      size_t alloc_size;\n\n      if (n <= max_size) {\n\twhile (n > sizes[bucket]) bucket++;\n\tmaybe<void*> r = large_buckets[bucket-num_small].pop();\n\tif (r) return *r;\n\talloc_size = sizes[bucket];\n      } else alloc_size = n;\n\n      void* a = (void*) aligned_alloc(large_align, alloc_size);\n      if (a == NULL) throw std::bad_alloc();\n      \n      large_allocated += n;\n      return a;\n    }\n\n    void deallocate_large(void* ptr, size_t n) {\n      if (n > max_size) { \n\tfree(ptr);\n\tlarge_allocated -= n;\n      } else {\n\tsize_t bucket = num_small;\n\twhile (n > sizes[bucket]) bucket++;\n\tlarge_buckets[bucket-num_small].push(ptr);\n      }\n    }\n\n    const size_t small_alloc_block_size = (1 << 20);\n\n  public:\n    ~pool_allocator() {\n      for (size_t i=0; i < num_small; i++)\n\tsmall_allocators[i].~block_allocator();\n      free(small_allocators);\n      clear();\n      delete[] large_buckets;\n    }\n\n    pool_allocator() {}\n  \n    pool_allocator(std::vector<size_t> const &sizes) : sizes(sizes) {\n      timer t;\n      num_buckets = sizes.size();\n      max_size = sizes[num_buckets-1];\n      num_small = 0;\n      while (sizes[num_small] < large_threshold && num_small < num_buckets)\n\tnum_small++;\n      max_small = (num_small > 0) ? sizes[num_small - 1] : 0;\n\n      large_buckets = new concurrent_stack<void*>[num_buckets-num_small];\n\n      small_allocators = (struct block_allocator*)\n\tmalloc(num_buckets * sizeof(struct block_allocator));\n      size_t prev_bucket_size = 0;\n    \n      for (size_t i = 0; i < num_small; i++) {\n\tsize_t bucket_size = sizes[i];\n\tif (bucket_size < 8)\n\t  throw std::invalid_argument(\"for small_allocator, bucket sizes must be at least 8\");\n\tif (!(bucket_size > prev_bucket_size))\n\t  throw std::invalid_argument(\"for small_allocator, bucket sizes must increase\");\n\tprev_bucket_size = bucket_size;\n\tnew (static_cast<void*>(std::addressof(small_allocators[i]))) \n\t  block_allocator(bucket_size, 0, small_alloc_block_size - 64); \n      }\n    }\n\n    void* allocate(size_t n) {\n      if (n > max_small) return allocate_large(n);\n      size_t bucket = 0;\n      while (n > sizes[bucket]) bucket++;\n      return small_allocators[bucket].alloc();\n    }\n\n    void deallocate(void* ptr, size_t n) {\n      if (n > max_small) deallocate_large(ptr, n);\n      else {\n\tsize_t bucket = 0;\n\twhile (n > sizes[bucket]) bucket++;\n\tsmall_allocators[bucket].free(ptr);\n      }\n    }\n\n    // allocate, touch, and free to make sure space for small blocks is paged in\n    void reserve(size_t bytes) {\n      size_t bc = bytes/small_alloc_block_size;\n      std::vector<void*> h(bc);\n      parallel_for(0, bc, [&] (size_t i) {\n\t  h[i] = allocate(small_alloc_block_size);\n\t}, 1);\n      parallel_for(0, bc, [&] (size_t i) {\n\t  for (size_t j=0; j < small_alloc_block_size; j += (1 << 12))\n\t    ((char*) h[i])[j] = 0;\n\t}, 1);\n      for (size_t i=0; i < bc; i++)\n      \tdeallocate(h[i], small_alloc_block_size);\n    }\n\n    void print_stats() {\n      size_t total_a = 0;\n      size_t total_u = 0;\n      for (size_t i = 0; i < num_small; i++) {\n\tsize_t bucket_size = sizes[i];\n\tsize_t allocated = small_allocators[i].num_allocated_blocks();\n\tsize_t used = small_allocators[i].num_used_blocks();\n\ttotal_a += allocated * bucket_size;\n\ttotal_u += used * bucket_size;\n\tcout << \"size = \" << bucket_size << \", allocated = \" << allocated\n\t     << \", used = \" << used << endl;\n      }\n      cout << \"Large allocated = \" << large_allocated << endl;\n      cout << \"Total bytes allocated = \" << total_a + large_allocated << endl;\n      cout << \"Total bytes used = \" << total_u << endl;\n    }\n\n    void clear() {\n      for (size_t i = num_small; i < num_buckets; i++) {\n\tmaybe<void*> r = large_buckets[i-num_small].pop();\n\twhile (r) {\n\t  large_allocated -= sizes[i];\n\t  free(*r);\n\t  r = large_buckets[i-num_small].pop();\n\t}\n      }\n    }\n  };\n\n  // ****************************************\n  //    default_allocator (uses powers of two as pool sizes)\n  // ****************************************\n\n  // these are bucket sizes used by the default allocator.\n  std::vector<size_t> default_sizes() {\n    size_t log_min_size = 4;\n    size_t log_max_size = pbbs::log2_up(getMemorySize()/64);\n\n    std::vector<size_t> sizes;\n    for (size_t i = log_min_size; i <= log_max_size; i++)\n      sizes.push_back(1 << i);\n    return sizes;\n  }\n\n  pool_allocator default_allocator(default_sizes());\n\n  // ****************************************\n  // Following Matches the c++ Allocator specification (minimally)\n  // https://en.cppreference.com/w/cpp/named_req/Allocator\n  // Can therefore be used for containers, e.g.:\n  //    std::vector<int, pbbs::allocator<int>>\n  // ****************************************\n\n  template <typename T>\n  struct allocator {\n    using value_type = T;\n    T* allocate(size_t n) {\n      return (T*) default_allocator.allocate(n * sizeof(T));\n    }\n    void deallocate(T* ptr, size_t n) {\n      default_allocator.deallocate((void*) ptr, n * sizeof(T));\n    }\n\n    allocator() = default;\n    template <class U> constexpr allocator(const allocator<U>&) {}\n  };\n\n  template <class T, class U>\n  bool operator==(const allocator<T>&, const allocator<U>&) { return true; }\n  template <class T, class U>\n  bool operator!=(const allocator<T>&, const allocator<U>&) { return false; }\n\n  // ****************************************\n  // Static allocator for single items of a given type, e.g.\n  //   using long_allocator = type_allocator<long>;\n  //   long* foo = long_allocator::alloc();\n  //   *foo = (long) 23;\n  //   long_allocator::free(foo);\n  // Uses block allocator, and is headerless  \n  // ****************************************\n\n  template <typename T>\n  class type_allocator {\n  public:\n    static constexpr size_t default_alloc_size = 0;\n    static block_allocator allocator;\n    static const bool initialized{true};\n    static T* alloc() { return (T*) allocator.alloc();}\n    static void free(T* ptr) {allocator.free((void*) ptr);}\n\n    // for backward compatibility\n    //static void init(size_t _alloc_size = 0, size_t _list_size=0) {};\n    static void init(size_t, size_t) {};\n    static void init() {};\n    static void reserve(size_t n = default_alloc_size) {\n      allocator.reserve(n);\n    }\n    static void finish() {allocator.clear();\n    }\n    static size_t block_size () {return allocator.block_size();}\n    static size_t num_allocated_blocks() {return allocator.num_allocated_blocks();}\n    static size_t num_used_blocks() {return allocator.num_used_blocks();}\n    static size_t num_used_bytes() {return num_used_blocks() * block_size();}\n    static void print_stats() {allocator.print_stats();}\n  };\n\n  template<typename T>\n  block_allocator type_allocator<T>::allocator = block_allocator(sizeof(T));\n  \n  // ****************************************\n  //    my_alloc and my_free (add size tags)\n  // ****************************************\n  //    ifdefed to either use malloc or the pbbs allocator\n  // ****************************************\n\n#ifdef USEMALLOC\n\n#include <malloc.h>\n\n  struct __mallopt {\n    __mallopt() {\n      mallopt(M_MMAP_MAX,0);\n      mallopt(M_TRIM_THRESHOLD,-1);\n    }\n  };\n\n  __mallopt __mallopt_var;\n  \n  inline void* my_alloc(size_t i) {return malloc(i);}\n  inline void my_free(void* p) {free(p);}\n  void allocator_clear() {}\n  void allocator_reserve(size_t bytes) {}\n\n#else\n\n  constexpr size_t size_offset = 1; // in size_t sized words\n\n  // needs to be at least size_offset * size_offset(size_t)\n  inline size_t header_size(size_t n) { // in bytes\n    return (n >= 1024) ? 64 : (n & 15) ? 8 : (n & 63) ? 16 : 64;\n  }\n\n  // allocates and tags with a header (8, 16 or 64 bytes) that contains the size\n  void* my_alloc(size_t n) {\n    size_t hsize = header_size(n);\n    void* ptr;\n    ptr = default_allocator.allocate(n + hsize);\n    void* r = (void*) (((char*) ptr) + hsize);\n    *(((size_t*) r)-size_offset) = n; // puts size in header\n    return r;\n  }\n\n  // reads the size, offsets the header and frees\n  void my_free(void *ptr) {\n    size_t n = *(((size_t*) ptr)-size_offset);\n    size_t hsize = header_size(n);\n    if (hsize > (1ul << 48)) {\n      cout << \"corrupted header in my_free\" << endl;\n      throw std::bad_alloc(); \n    }\n    default_allocator.deallocate((void*) (((char*) ptr) - hsize), n + hsize);\n  }\n\n  void allocator_clear() {\n    default_allocator.clear();\n  }\n\n  void allocator_reserve(size_t bytes) {\n    default_allocator.reserve(bytes);\n  }\n#endif\n\n  // ****************************************\n  //    common across allocators (key routines used by sequences)\n  // ****************************************\n\n  // Does not initialize the array\n  template<typename E>\n  E* new_array_no_init(size_t n) {\n    return (E*) my_alloc(n * sizeof(E));\n  }\n\n  // Initializes in parallel\n  template<typename E>\n  E* new_array(size_t n) {\n    E* r = new_array_no_init<E>(n);\n    if (!std::is_trivially_default_constructible<E>::value) \n      parallel_for(0, n, [&] (size_t i) {\n        new ((void*) (r+i)) E;});\n    return r;\n  }\n\n  inline void free_array(void* a) {\n    my_free(a);\n  }\n\n  // Destructs in parallel\n  template<typename E>\n  void delete_array(E* A, size_t n) {\n    // C++14 -- supported by gnu C++11\n    if (!std::is_trivially_destructible<E>::value)\n      parallel_for(0, n, [&] (size_t i) {\n        A[i].~E();});\n    else if (std::is_pointer<E>::value)\n      parallel_for(0, n, [&] (size_t i) {\n        if (A[i] != nullptr) delete A[i];});\n    my_free(A);\n  }\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/benchmark_lprobe.h",
    "content": "#ifndef BENCHMARK_LOCKFREE_HT\n#define BENCHMARK_LOCKFREE_HT\n\n#include <unordered_map>\n#include <iostream>\n#include <random>\n#include <algorithm>\n#include <pthread.h>\n#include <array>\n#include <unordered_map>\n#include <assert.h>\n\n#include \"cycle_timer.h\"\n#include \"hash_table.h\"\n#include \"thread_service.h\"\n\n#define NUM_ITERS   3\n#define MAX_THREADS 24\n\n#define C_NUM_ELEMS\t76800*24\n#include \"data.h\"\nusing namespace pbbs;\n\n\n\nclass BenchmarkLockFreeHT\n{\n  public:\n    BenchmarkLockFreeHT(int op_count, int capacity, \n                        int rweight, int idweight,\n                        int thread_count,\n                        double load_factor);\n\n    void benchmark_correctness();\n    void benchmark_hp();\n    void benchmark_all();\n    void run();\n\n  private:\n    int    m_rweight;\n    int    m_idweight;\n\n    int    m_thread_count;\n    int    m_op_count;\n    int    m_capacity;\n    double m_load_factor;\n};\n\nBenchmarkLockFreeHT::BenchmarkLockFreeHT(int op_count, int capacity, \n                                         int rweight, int idweight,\n                                         int thread_count, double load_factor)\n{\n  std::cout << \"*** BENCHMARKING LockFreeHT ***\" << std::endl;\n  m_op_count     = op_count;\n  m_load_factor  = load_factor; \n  m_capacity     = capacity;\n  m_thread_count = thread_count;\n\n  m_rweight      = rweight;\n  m_idweight     = idweight;\n}\n\nvoid BenchmarkLockFreeHT::benchmark_correctness()\n{\n  bool correct = true;\n\n  //Lockfree_hash_table ht(2 * C_NUM_ELEMS, m_thread_count);\n\tTable<hashKV> ht(2*C_NUM_ELEMS, hashKV(), 1.3);\n  std::unordered_map<int, int> map;\n  map.reserve(2 * C_NUM_ELEMS);\n  \n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  int elems[C_NUM_ELEMS];\n  for (int i = 0; i < C_NUM_ELEMS; i++)\n  {\n    //int k = rng(mt);\n\t\tint k = 100;\n    elems[i] = k;\n    map[k] = k;\n  }\n\t//adding err\n\t//elems[5*C_NUM_ELEMS/24 + 34]  = elems[6*C_NUM_ELEMS/24 + 49];\n\t//elems[22*C_NUM_ELEMS/24 + 199]  = elems[9*C_NUM_ELEMS/24 + 347];\n\t//elems[21*C_NUM_ELEMS/24 + 199]  = elems[9*C_NUM_ELEMS/24 + 347];\n\t//elems[19*C_NUM_ELEMS/24 + 199]  = elems[9*C_NUM_ELEMS/24 + 347];\n\tfor (int i=0;i<23;i++)\n\t\tfor (int j=0;j<20;j++)\n\t\telems[i*C_NUM_ELEMS/24 + 34+j]  = 101+i*20+j;\n  \n  pthread_t  workers[MAX_THREADS];\n  WorkerArgs args[MAX_THREADS];\n\n  for (int i = 0; i < 24; i++)\n  {\n    args[i].num_elems = C_NUM_ELEMS / 24;\n    args[i].ht_p      = (void*)&ht;\n    args[i].elems     = elems;\n    args[i].start     = i * (C_NUM_ELEMS / 24);\n    args[i].tid       = i;\n\n    pthread_create(&workers[i], NULL, thread_checkmiss<Table<hashKV>>, (void*)&args[i]);\n  }\n\n  for (int i = 0; i < 24; i++)\n  {\n    pthread_join(workers[i], NULL);\n  }\n\n\tstd::cout << \"hash table count is \" << ht.count() << std::endl;\n\tstd::cout << \"miss is \" << miss << std::endl;\n\tassert(miss==461);\n\n  int count = 0;\n  for (std::pair<int, int> e : map)\n  {\n    //std::pair<int, bool> r = ht.search(e.first, 0);\n\t\tstruct KV res  = ht.find(e.first);\n\t\tstd::pair<int,bool> r;\n\t\tif (res.k == -1)\n\t\t\tr = {-1,false};\n\t\telse\n\t\t\tr = {res.v,true};\n    if (!r.second || e.second != r.first)\n    {\n\n      std::cout << \"\\t\" << \"Expected value, Received value, Received result = \" << e.second << \" \" << r.second << \" \"<< r.first << std::endl;\n      correct = false;\n      count++;\n    }\n  }\n\n  std::cout << \"\\t\" << count << \"/\" << C_NUM_ELEMS << \" errors\" << std::endl;\n\n  if (correct)\n    std::cout << \"\\t\" << \"Correctness test passed\" << std::endl;\n  else\n    std::cout << \"\\t\" << \"Correctness test failed\" << std::endl;\n\n}\n\nvoid BenchmarkLockFreeHT::benchmark_hp()\n{\n  //Lockfree_hash_table ht(400000, m_thread_count);\n\tTable<hashKV> ht(400000, hashKV(), 1.3);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = m_rweight;\n  weights[1] = m_idweight;\n  weights[2] = m_idweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int insert[200000];\n  for (int i = 0; i < 200000; i++)\n  {\n    int k = rng(mt);\n    int v = rng(mt);\n    insert[i] = k;\n    //ht.insert(k, v, 0);\n\t\tht.insert({k,v});\n  }\n  \n  pthread_t  workers[MAX_THREADS];\n  WorkerArgs args[MAX_THREADS];\n\n  int num_elems = 200000 / m_thread_count;\n  for (int i = 0; i < m_thread_count; i++)\n  {\n    args[i].num_elems = num_elems;\n    args[i].ht_p      = (void*)&ht;\n    args[i].elems     = insert;\n    args[i].start     = i * num_elems;\n    args[i].tid       = i;\n    args[i].remove    = i < (m_thread_count / 4);\n\n    pthread_create(&workers[i], NULL, thread_remove<Table<hashKV>>, (void*)&args[i]);\n  }\n  \n  for (int i = 0; i < m_thread_count; i++)\n  {\n    pthread_join(workers[i], NULL);\n  }\n   \n  std::cout << \"\\t\" << \"Hazard Pointer test passed\" << std::endl;\n\n}\n\nvoid BenchmarkLockFreeHT::benchmark_all()\n{\n   // Lockfree_hash_table ht(m_capacity, m_thread_count);\n\t\tTable<hashKV> ht(m_capacity, hashKV(), 1.3);\n\n    std::random_device                 rd;\n    std::mt19937                       mt(rd());\n    std::uniform_int_distribution<int> rng;\n\n    std::array<int, 3> weights;\n    weights[0] = m_rweight;\n    weights[1] = m_idweight;\n    weights[2] = m_idweight;\n\n    std::default_random_engine         g;\n    std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n    // Warm-up table to load factor\n    int num_warmup = static_cast<int>(static_cast<double>(m_capacity) * m_load_factor);\n    for (int i = 0; i < num_warmup; i++)\n    {\n      int k = rng(mt); \n      int v = rng(mt);\n\n      //ht.insert(k, v, 0);\n      ht.insert({k,v});\n    }\n\n    // Run benchmark\n    std::vector<double> results;\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        pthread_create(&workers[i], NULL, thread_service<Table<hashKV>>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    double best_time = *std::min_element(results.begin(), results.end());\n    double avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput: \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput: \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n\n    int* keys = new int[m_op_count];\n\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        args[i].elems     = keys;\n        args[i].start     = i * num_elems;\n        pthread_create(&workers[i], NULL, thread_service_low_contention<Table<hashKV>>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (Low): \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (Low): \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        //ht.insert(0, 0, 0);\n        ht.insert({0,0});\n        pthread_create(&workers[i], NULL, thread_service_high_contention<Table<hashKV>>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (High): \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (High): \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n\n}\n\nvoid BenchmarkLockFreeHT::run()\n{\n  benchmark_correctness();\n//  benchmark_hp();\n//  benchmark_all();\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/benchmark_lprobe_ptr.h",
    "content": "#ifndef BENCHMARK_LOCKFREE_HT\n#define BENCHMARK_LOCKFREE_HT\n\n#include <unordered_map>\n#include <iostream>\n#include <random>\n#include <algorithm>\n#include <pthread.h>\n#include <array>\n#include <unordered_map>\n#include \"assert.h\"\n\n#include \"cycle_timer.h\"\n#include \"hash_table.h\"\n#include \"thread_service_ptr.h\"\n\n#define NUM_ITERS   3\n#define MAX_THREADS 24\n\n#define C_NUM_ELEMS\t76800*24\n#include \"data_ptr.h\"\nusing namespace pbbs;\n\n\n\nclass BenchmarkLockFreeHT\n{\n  public:\n    BenchmarkLockFreeHT(int op_count, int capacity, \n                        int rweight, int idweight,\n                        int thread_count,\n                        double load_factor);\n\n    void benchmark_correctness();\n    void benchmark_hp();\n    void benchmark_all();\n    void run();\n\n  private:\n    int    m_rweight;\n    int    m_idweight;\n\n    int    m_thread_count;\n    int    m_op_count;\n    int    m_capacity;\n    double m_load_factor;\n};\n\nBenchmarkLockFreeHT::BenchmarkLockFreeHT(int op_count, int capacity, \n                                         int rweight, int idweight,\n                                         int thread_count, double load_factor)\n{\n  std::cout << \"*** BENCHMARKING LockFreeHT ***\" << std::endl;\n  m_op_count     = op_count;\n  m_load_factor  = load_factor; \n  m_capacity     = capacity;\n  m_thread_count = thread_count;\n\n  m_rweight      = rweight;\n  m_idweight     = idweight;\n}\n\nvoid BenchmarkLockFreeHT::benchmark_correctness()\n{\n  bool correct = true;\n\n  //Lockfree_hash_table ht(2 * C_NUM_ELEMS, m_thread_count);\n\tTable<hashKV> ht(2*C_NUM_ELEMS, hashKV(), 1.3);\n  std::unordered_map<int, int> map;\n  map.reserve(2 * C_NUM_ELEMS);\n  \n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  int elems[C_NUM_ELEMS];\n  for (int i = 0; i < C_NUM_ELEMS; i++)\n  {\n    //int k = rng(mt);\n\t\tint k = 100;\n    elems[i] = k;\n    map[k] = k;\n  }\n\t//adding err\n\t//elems[5*C_NUM_ELEMS/24 + 34]  = elems[6*C_NUM_ELEMS/24 + 49];\n\t//elems[22*C_NUM_ELEMS/24 + 199]  = elems[9*C_NUM_ELEMS/24 + 347];\n\t//elems[21*C_NUM_ELEMS/24 + 199]  = elems[9*C_NUM_ELEMS/24 + 347];\n\t//elems[19*C_NUM_ELEMS/24 + 199]  = elems[9*C_NUM_ELEMS/24 + 347];\n\tfor (int i=0;i<23;i++)\n\t\tfor (int j=0;j<20;j++)\n\t\telems[i*C_NUM_ELEMS/24 + 34+j]  = 101+i*20+j;\n  \n  pthread_t  workers[MAX_THREADS];\n  WorkerArgs args[MAX_THREADS];\n\n  for (int i = 0; i < 24; i++)\n  {\n    args[i].num_elems = C_NUM_ELEMS / 24;\n    args[i].ht_p      = (void*)&ht;\n    args[i].elems     = elems;\n    args[i].start     = i * (C_NUM_ELEMS / 24);\n    args[i].tid       = i;\n\n    pthread_create(&workers[i], NULL, thread_checkmiss<Table<hashKV>>, (void*)&args[i]);\n  }\n\n  for (int i = 0; i < 24; i++)\n  {\n    pthread_join(workers[i], NULL);\n  }\n\n\n\tstd::cout << \"hash table count is \" << ht.count() << std::endl;\n\tstd::cout << \"miss is \" << miss << std::endl;\n\tassert(miss==461);\n  int count = 0;\n  for (std::pair<int, int> e : map)\n  {\n    //std::pair<int, bool> r = ht.search(e.first, 0);\n\t\tstruct KV *res  = ht.find(e.first);\n\t\tstd::pair<int,bool> r;\n\t\tif (res==nullptr || res->k == -1)\n\t\t\tr = {-1,false};\n\t\telse\n\t\t\tr = {res->v,true};\n    if (!r.second || e.second != r.first)\n    {\n\n      std::cout << \"\\t\" << \"Expected value, Received value, Received result = \" << e.second << \" \" << r.second << \" \"<< r.first << std::endl;\n      correct = false;\n      count++;\n    }\n  }\n\n  std::cout << \"\\t\" << count << \"/\" << C_NUM_ELEMS << \" errors\" << std::endl;\n\n  if (correct)\n    std::cout << \"\\t\" << \"Correctness test passed\" << std::endl;\n  else\n    std::cout << \"\\t\" << \"Correctness test failed\" << std::endl;\n\n}\n\nvoid BenchmarkLockFreeHT::benchmark_hp()\n{\n  //Lockfree_hash_table ht(400000, m_thread_count);\n\tTable<hashKV> ht(400000, hashKV(), 1.3);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = m_rweight;\n  weights[1] = m_idweight;\n  weights[2] = m_idweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int insert[200000];\n  for (int i = 0; i < 200000; i++)\n  {\n    int k = rng(mt);\n    int v = rng(mt);\n    insert[i] = k;\n    //ht.insert(k, v, 0);\n\t\tht.insert(new struct KV(k,v));\n  }\n  \n  pthread_t  workers[MAX_THREADS];\n  WorkerArgs args[MAX_THREADS];\n\n  int num_elems = 200000 / m_thread_count;\n  for (int i = 0; i < m_thread_count; i++)\n  {\n    args[i].num_elems = num_elems;\n    args[i].ht_p      = (void*)&ht;\n    args[i].elems     = insert;\n    args[i].start     = i * num_elems;\n    args[i].tid       = i;\n    args[i].remove    = i < (m_thread_count / 4);\n\n    pthread_create(&workers[i], NULL, thread_remove<Table<hashKV>>, (void*)&args[i]);\n  }\n  \n  for (int i = 0; i < m_thread_count; i++)\n  {\n    pthread_join(workers[i], NULL);\n  }\n   \n  std::cout << \"\\t\" << \"Hazard Pointer test passed\" << std::endl;\n\n}\n\nvoid BenchmarkLockFreeHT::benchmark_all()\n{\n   // Lockfree_hash_table ht(m_capacity, m_thread_count);\n\t\tTable<hashKV> ht(m_capacity, hashKV(), 1.3);\n\n    std::random_device                 rd;\n    std::mt19937                       mt(rd());\n    std::uniform_int_distribution<int> rng;\n\n    std::array<int, 3> weights;\n    weights[0] = m_rweight;\n    weights[1] = m_idweight;\n    weights[2] = m_idweight;\n\n    std::default_random_engine         g;\n    std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n    // Warm-up table to load factor\n    int num_warmup = static_cast<int>(static_cast<double>(m_capacity) * m_load_factor);\n    for (int i = 0; i < num_warmup; i++)\n    {\n      int k = rng(mt); \n      int v = rng(mt);\n\n      //ht.insert(k, v, 0);\n      ht.insert(new struct KV(k,v));\n    }\n\n    // Run benchmark\n    std::vector<double> results;\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        pthread_create(&workers[i], NULL, thread_service<Table<hashKV>>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    double best_time = *std::min_element(results.begin(), results.end());\n    double avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput: \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput: \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n\n    int* keys = new int[m_op_count];\n\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        args[i].elems     = keys;\n        args[i].start     = i * num_elems;\n        pthread_create(&workers[i], NULL, thread_service_low_contention<Table<hashKV>>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (Low): \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (Low): \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n    results.clear();\n\n    for (int iter = 0; iter < NUM_ITERS; iter++)\n    {\n      int num_elems = m_op_count / m_thread_count;\n      pthread_t  workers[MAX_THREADS];\n      WorkerArgs args[MAX_THREADS];\n\n      double start = CycleTimer::currentSeconds();\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        args[i].num_elems = num_elems;\n        args[i].rweight   = m_rweight;\n        args[i].iweight   = m_idweight / 2;\n        args[i].dweight   = m_idweight / 2;\n        args[i].ht_p      = (void*)&ht;\n        args[i].tid       = i;\n        //ht.insert(0, 0, 0);\n        ht.insert(new struct KV(0,0));\n        pthread_create(&workers[i], NULL, thread_service_high_contention<Table<hashKV>>, (void*)&args[i]);\n      }\n\n      for (int i = 0; i < m_thread_count; i++)\n      {\n        pthread_join(workers[i], NULL);\n      }\n      double time  = CycleTimer::currentSeconds() - start;\n      results.push_back(time);\n    }\n\n    // Publish Results\n    best_time = *std::min_element(results.begin(), results.end());\n    avg_time  = std::accumulate(results.begin(), results.end(), 0.0) / static_cast<double>(results.size());\n    std::cout << \"\\t\" << \"Max Throughput (High): \" << m_op_count / best_time / 1000.0 << \" ops/ms\" << std::endl;\n    std::cout << \"\\t\" << \"Avg Throughput (High): \" << m_op_count / avg_time  / 1000.0 << \" ops/ms\" << std::endl;\n\n\n}\n\nvoid BenchmarkLockFreeHT::run()\n{\n  benchmark_correctness();\n//  benchmark_hp();\n//  benchmark_all();\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/block_allocator.h",
    "content": "// This code is part of the Problem Based Benchmark Suite (PBBS)\n// Copyright (c) 2016 Guy Blelloch, Daniel Ferizovic, and the PBBS team\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights (to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to\n// permit persons to whom the Software is furnished to do so, subject to\n// the following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n// A concurrent allocator for any fixed type T\n// Keeps a local pool per processor\n// Grabs list_size elements from a global pool if empty, and\n// Returns list_size elements to the global pool when local pool=2*list_size\n// Keeps track of number of allocated elements.\n// Probably more efficient than a general purpose allocator\n\n#pragma once\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <math.h>\n#include <atomic>\n#include \"concurrent_stack.h\"\n#include \"utilities.h\"\n#include \"memory_size.h\"\n\nstruct block_allocator {\nprivate:\n\n  static const size_t default_list_bytes = (1 << 22) - 64; // in bytes\n  static const size_t pad_size = 256;\n\n  struct block {\n    block* next;\n  };\n\n  using block_p = block*;\n\n  struct alignas(64) thread_list {\n    size_t sz;\n    block_p head;\n    block_p mid;\n    char cache_line[pad_size];\n  thread_list() : sz(0), head(NULL) {};\n  };\n\n  bool initialized{false};\n  block_p initialize_list(block_p);\n  block_p get_list();\n  concurrent_stack<char*> pool_roots;\n  concurrent_stack<block_p> global_stack;\n  thread_list* local_lists;\n\n  size_t list_length;\n  size_t max_blocks;\n  size_t block_size_;\n  //std::atomic<size_t> blocks_allocated;\n  size_t blocks_allocated;\n  char* allocate_blocks(size_t num_blocks);\n\npublic:\n  static int thread_count;\n  void* alloc();\n  void free(void*);\n  void reserve(size_t n);\n  void clear();\n  void print_stats();\n  size_t block_size () {return block_size_;}\n  size_t num_allocated_blocks() {return blocks_allocated;}\n  size_t num_used_blocks();\n\n  ~block_allocator();\n  block_allocator(size_t block_size,\n\t\t  size_t reserved_blocks = 0, \n\t\t  size_t list_length_ = 0, \n\t\t  size_t max_blocks_ = 0);\n  block_allocator() {};\n};\n\nint block_allocator::thread_count = num_workers();\n\n// Allocate a new list of list_length elements\n\nauto block_allocator::initialize_list(block_p start) -> block_p {\n  parallel_for (0, list_length - 1, [&] (size_t i) {\n      block_p p =  (block_p) (((char*) start) + i * block_size_);\n      p->next = (block_p) (((char*) p) + block_size_);\n    }, 1000, true);\n  block_p last = (block_p) (((char*) start) + (list_length-1) * block_size_);\n  last->next = NULL;\n  return start;\n}\n\nsize_t block_allocator::num_used_blocks() {\n  size_t free_blocks = global_stack.size()*list_length;\n  for (int i = 0; i < thread_count; ++i) \n    free_blocks += local_lists[i].sz;\n  return blocks_allocated - free_blocks;\n}\n\nauto block_allocator::allocate_blocks(size_t num_blocks) -> char* {\n  //char* start = (char*) aligned_alloc(pad_size,\n  //num_blocks * block_size_+ pad_size);\n  char* start = (char*) pbbs::my_alloc(num_blocks * block_size_);\n  if (start == NULL) {\n    fprintf(stderr, \"Cannot allocate space in block_allocator\");\n    exit(1); }\n\n  pbbs::fetch_and_add(&blocks_allocated, num_blocks); // atomic\n\n  if (blocks_allocated > max_blocks) {\n    fprintf(stderr, \"Too many blocks in block_allocator, change max_blocks\");\n    exit(1);  }\n\n  pool_roots.push(start); // keep track so can free later\n  return start;\n}\n\n// Either grab a list from the global pool, or if there is none\n// then allocate a new list\nauto block_allocator::get_list() -> block_p {\n  maybe<block_p> rem = global_stack.pop();\n  if (rem) return *rem;\n  block_p start = (block_p) allocate_blocks(list_length);\n  return initialize_list(start);\n}\n\n// Allocate n elements across however many lists are needed (rounded up)\nvoid block_allocator::reserve(size_t n) {\n  size_t num_lists = thread_count + ceil(n / (double)list_length);\n  char* start = allocate_blocks(list_length*num_lists);\n  parallel_for(0, num_lists, [&] (size_t i) {\n      block_p offset = (block_p) (start + i * list_length * block_size_);\n      global_stack.push(initialize_list(offset));\n    });\n}\n\nvoid block_allocator::print_stats() {\n  size_t used = num_used_blocks();\n  size_t allocated = num_allocated_blocks();\n  size_t size = block_size();\n  std::cout << \"Used: \" << used << \", allocated: \" << allocated\n\t    << \", block size: \" << size\n\t    << \", bytes: \" << size*allocated << std::endl;\n}\n\nblock_allocator::block_allocator(size_t block_size,\n\t\t\t\t size_t reserved_blocks,\n\t\t\t\t size_t list_length_,\n\t\t\t\t size_t max_blocks_) {\n  blocks_allocated = 0;\n  block_size_ = block_size;\n  if (list_length_ == 0)\n    list_length = default_list_bytes / block_size;\n  else list_length = list_length_ / block_size;\n  if  (max_blocks_ == 0)\n    max_blocks = (3*getMemorySize()/block_size)/4;\n  else max_blocks = max_blocks_;\n\n  reserve(reserved_blocks);\n\n  // all local lists start out empty\n  local_lists = new thread_list[thread_count];\n  initialized = true;\n}\n\nvoid block_allocator::clear() {\n  if (num_used_blocks() > 0) \n    cout << \"Warning: not clearing memory pool, block_size=\" << block_size()\n\t << \" : allocated blocks remain\" << endl;\n  else {\n    // clear lists\n    for (int i = 0; i < thread_count; ++i) \n      local_lists[i].sz = 0;\n  \n    // throw away all allocated memory\n    maybe<char*> x;\n    while ((x = pool_roots.pop())) pbbs::my_free(*x); //std::free(*x);\n    pool_roots.clear();\n    global_stack.clear();\n    blocks_allocated = 0;\n  }\n}\n\nblock_allocator::~block_allocator() {\n  clear();\n  delete[] local_lists;\n}\n\nvoid block_allocator::free(void* ptr) {\n  block_p new_node = (block_p) ptr;\n  int id = worker_id();\n\n  if (local_lists[id].sz == list_length+1) {\n    local_lists[id].mid = local_lists[id].head;\n  } else if (local_lists[id].sz == 2*list_length) {\n    global_stack.push(local_lists[id].mid->next);\n    local_lists[id].mid->next = NULL;\n    local_lists[id].sz = list_length;\n  }\n  new_node->next = local_lists[id].head;\n  local_lists[id].head = new_node;\n  local_lists[id].sz++;\n}\n\ninline void* block_allocator::alloc() {\n  int id = worker_id();\n\n  if (local_lists[id].sz == 0)  {\n    local_lists[id].head = get_list();\n    local_lists[id].sz = list_length;\n  }\n\n  local_lists[id].sz--;\n  block_p p = local_lists[id].head;\n  local_lists[id].head = local_lists[id].head->next;\n\n  return (void*) p;\n}\n\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/concurrent_stack.h",
    "content": "// This code is part of the Problem Based Benchmark Suite (PBBS)\n// Copyright (c) 2016 Guy Blelloch, Daniel Ferizovic, and the PBBS team\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights (to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to\n// permit persons to whom the Software is furnished to do so, subject to\n// the following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n// Lock free, linearizable implementation of a concurrent stack\n// supporting:\n//    push\n//    pop\n//    size\n// Works for elements of any type T\n// It requires memory proportional to the largest it has been\n// This can be cleared, but only when noone else is using it.\n// Requires 128-bit-compare-and-swap\n// Counter could overflow \"in theory\", but would require over 500 years even\n// if updated every nanosecond (and must be updated sequentially)\n\n#pragma once\n#include <cstdio>\n#include <cstdint>\n#include <iostream>\n#include \"utilities.h\"\n\ntemplate<typename T>\nclass concurrent_stack {\n\n  struct Node {\n    T value;\n    Node* next;\n    size_t length;\n  };\n\n  class alignas(64) prim_concurrent_stack {\n    struct nodeAndCounter {\n      Node* node;\n      uint64_t counter;\n    };\n\n    union CAS_t {\n      __uint128_t x;\n      nodeAndCounter NC;\n    };\n    CAS_t head;\n\n    size_t length(Node* n) {\n      if (n == NULL) return 0;\n      else return n->length;\n    }\n\n  public:\n    prim_concurrent_stack() {\n      head.NC.node = NULL;\n      head.NC.counter = 0;\n      std::atomic_thread_fence(std::memory_order_seq_cst);\n    }\n\n    size_t size() {\n      return length(head.NC.node);}\n\n    void push(Node* newNode){\n      CAS_t oldHead, newHead;\n      do {\n\toldHead = head;\n\tnewNode->next = oldHead.NC.node;\n\tnewNode->length = length(oldHead.NC.node) + 1;\n\t//std::atomic_thread_fence(std::memory_order_release);\n\tstd::atomic_thread_fence(std::memory_order_seq_cst);\n\tnewHead.NC.node = newNode;\n\tnewHead.NC.counter = oldHead.NC.counter + 1;\n      } while (!__sync_bool_compare_and_swap_16(&head.x,oldHead.x, newHead.x));\n    }\n    Node* pop() {\n      Node* result;\n      CAS_t oldHead, newHead;\n      do {\n\toldHead = head;\n\tresult = oldHead.NC.node;\n\tif (result == NULL) return result;\n\tnewHead.NC.node = result->next;\n\tnewHead.NC.counter = oldHead.NC.counter + 1;\n      } while (!__sync_bool_compare_and_swap_16(&head.x,oldHead.x, newHead.x));\n\n      return result;\n    }\n  };// __attribute__((aligned(16)));\n\n  prim_concurrent_stack a;\n  prim_concurrent_stack b;\n\n public:\n\n  size_t size() { return a.size();}\n\n  void push(T v) {\n    Node* x = b.pop();\n    if (!x) x = (Node*) malloc(sizeof(Node));\n    x->value = v;\n    a.push(x);\n  }\n\n  maybe<T> pop() {\n    Node* x = a.pop();\n    if (!x) return maybe<T>();\n    T r = x->value;\n    b.push(x);\n    return maybe<T>(r);\n  }\n\n  // assumes no push or pop in progress\n  void clear() {\n    Node* x;\n    while ((x = a.pop())) free(x);\n    while ((x = b.pop())) free(x);\n  }\n\n  concurrent_stack() {}\n  ~concurrent_stack() { clear();}\n};\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/cycle_timer.h",
    "content": "#ifndef _SYRAH_CYCLE_TIMER_H_\n#define _SYRAH_CYCLE_TIMER_H_\n\n#if defined(__APPLE__)\n  #if defined(__x86_64__)\n    #include <sys/sysctl.h>\n  #else\n    #include <mach/mach.h>\n    #include <mach/mach_time.h>\n  #endif // __x86_64__ or not\n\n  #include <stdio.h>  // fprintf\n  #include <stdlib.h> // exit\n\n#elif _WIN32\n#  include <windows.h>\n#  include <time.h>\n#else\n#  include <stdio.h>\n#  include <stdlib.h>\n#  include <string.h>\n#  include <sys/time.h>\n#endif\n\n\n  // This uses the cycle counter of the processor.  Different\n  // processors in the system will have different values for this.  If\n  // you process moves across processors, then the delta time you\n  // measure will likely be incorrect.  This is mostly for fine\n  // grained measurements where the process is likely to be on the\n  // same processor.  For more global things you should use the\n  // Time interface.\n\n  // Also note that if you processors' speeds change (i.e. processors\n  // scaling) or if you are in a heterogenous environment, you will\n  // likely get spurious results.\n  class CycleTimer {\n  public:\n    typedef unsigned long long SysClock;\n\n    //////////\n    // Return the current CPU time, in terms of clock ticks.\n    // Time zero is at some arbitrary point in the past.\n    static SysClock currentTicks() {\n#if defined(__APPLE__) && !defined(__x86_64__)\n      return mach_absolute_time();\n#elif defined(_WIN32)\n      LARGE_INTEGER qwTime;\n      QueryPerformanceCounter(&qwTime);\n      return qwTime.QuadPart;\n#elif defined(__x86_64__)\n      unsigned int a, d;\n      asm volatile(\"rdtsc\" : \"=a\" (a), \"=d\" (d));\n      return static_cast<unsigned long long>(a) |\n        (static_cast<unsigned long long>(d) << 32);\n#elif defined(__ARM_NEON__) && 0 // mrc requires superuser.\n      unsigned int val;\n      asm volatile(\"mrc p15, 0, %0, c9, c13, 0\" : \"=r\"(val));\n      return val;\n#else\n      timespec spec;\n      clock_gettime(CLOCK_THREAD_CPUTIME_ID, &spec);\n      return CycleTimer::SysClock(static_cast<float>(spec.tv_sec) * 1e9 + static_cast<float>(spec.tv_nsec));\n#endif\n    }\n\n    //////////\n    // Return the current CPU time, in terms of seconds.\n    // This is slower than currentTicks().  Time zero is at\n    // some arbitrary point in the past.\n    static double currentSeconds() {\n      return currentTicks() * secondsPerTick();\n    }\n\n    //////////\n    // Return the conversion from seconds to ticks.\n    static double ticksPerSecond() {\n      return 1.0/secondsPerTick();\n    }\n\n    static const char* tickUnits() {\n#if defined(__APPLE__) && !defined(__x86_64__)\n      return \"ns\";\n#elif defined(__WIN32__) || defined(__x86_64__)\n      return \"cycles\";\n#else\n      return \"ns\"; // clock_gettime\n#endif\n    }\n\n    //////////\n    // Return the conversion from ticks to seconds.\n    static double secondsPerTick() {\n      static bool initialized = false;\n      static double secondsPerTick_val;\n      if (initialized) return secondsPerTick_val;\n#if defined(__APPLE__)\n  #ifdef __x86_64__\n      int args[] = {CTL_HW, HW_CPU_FREQ};\n      unsigned int Hz;\n      size_t len = sizeof(Hz);\n      if (sysctl(args, 2, &Hz, &len, NULL, 0) != 0) {\n         fprintf(stderr, \"Failed to initialize secondsPerTick_val!\\n\");\n         exit(-1);\n      }\n      secondsPerTick_val = 1.0 / (double) Hz;\n  #else\n      mach_timebase_info_data_t time_info;\n      mach_timebase_info(&time_info);\n\n      // Scales to nanoseconds without 1e-9f\n      secondsPerTick_val = (1e-9*static_cast<double>(time_info.numer))/\n        static_cast<double>(time_info.denom);\n  #endif // x86_64 or not\n#elif defined(_WIN32)\n      LARGE_INTEGER qwTicksPerSec;\n      QueryPerformanceFrequency(&qwTicksPerSec);\n      secondsPerTick_val = 1.0/static_cast<double>(qwTicksPerSec.QuadPart);\n#else\n      FILE *fp = fopen(\"/proc/cpuinfo\",\"r\");\n      char input[1024];\n      if (!fp) {\n         fprintf(stderr, \"CycleTimer::resetScale failed: couldn't find /proc/cpuinfo.\");\n         exit(-1);\n      }\n      // In case we don't find it, e.g. on the N900\n      secondsPerTick_val = 1e-9;\n      while (!feof(fp) && fgets(input, 1024, fp)) {\n        // NOTE(boulos): Because reading cpuinfo depends on dynamic\n        // frequency scaling it's better to read the @ sign first\n        float GHz, MHz;\n        if (strstr(input, \"model name\")) {\n          char* at_sign = strstr(input, \"@\");\n          if (at_sign) {\n            char* after_at = at_sign + 1;\n            char* GHz_str = strstr(after_at, \"GHz\");\n            char* MHz_str = strstr(after_at, \"MHz\");\n            if (GHz_str) {\n              *GHz_str = '\\0';\n              if (1 == sscanf(after_at, \"%f\", &GHz)) {\n                //printf(\"GHz = %f\\n\", GHz);\n                secondsPerTick_val = 1e-9f / GHz;\n                break;\n              }\n            } else if (MHz_str) {\n              *MHz_str = '\\0';\n              if (1 == sscanf(after_at, \"%f\", &MHz)) {\n                //printf(\"MHz = %f\\n\", MHz);\n                secondsPerTick_val = 1e-6f / MHz;\n                break;\n              }\n            }\n          }\n        } else if (1 == sscanf(input, \"cpu MHz : %f\", &MHz)) {\n          //printf(\"MHz = %f\\n\", MHz);\n          secondsPerTick_val = 1e-6f / MHz;\n          break;\n        }\n      }\n      fclose(fp);\n#endif\n\n      initialized = true;\n      return secondsPerTick_val;\n    }\n\n    //////////\n    // Return the conversion from ticks to milliseconds.\n    static double msPerTick() {\n      return secondsPerTick() * 1000.0;\n    }\n\n  private:\n    CycleTimer();\n  };\n\n#endif // #ifndef _SYRAH_CYCLE_TIMER_H_\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/data.h",
    "content": "#ifndef DATA_ELEMENT_\n#define DATA_ELEMENT_\n#include \"utilities.h\"\nusing namespace pbbs;\nstruct KV {\n\tint k;\n\tint v;\n\tbool operator== (struct KV other) { return k == other.k && v == other.v ;}\n\tbool operator!= (struct KV other) { return k != other.k || v != other.v ;}\n\tKV(int ak, int av) {k=ak;v=av;}\n};\n\nstruct hashKV {\n\tusing eType = struct KV;\n\tusing kType = int;\n\teType empty() {return {-1,-1};}\n\tkType getKey(eType v) {return v.k;}\n\t//int hash(kType v) {return v * 999029;} //hash64_2(v);}\n\tint hash(kType v) {return hash64_2(v);}\n\t//int cmp(kType v, kType b) {return (v > b) ? 1 : ((v == b) ? 0 : -1);}\n\tint cmp(kType v, kType b) {return (v == b) ? 0 : -1;}\n\tbool replaceQ(eType, eType) {return 0;}\n\teType update(eType v, eType) {return v;}\n\tbool cas(eType* p, eType o, eType n) {return\n\t\tatomic_compare_and_swap(p, o, n);}\n};\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/data_ptr.h",
    "content": "#ifndef DATA_ELEMENT_\n#define DATA_ELEMENT_\n#include \"utilities.h\"\nusing namespace pbbs;\nstruct KV {\n\tint k;\n\tint v;\n\t//bool operator== (struct KV other) { return k == other.k && v == other.v ;}\n\t//bool operator!= (struct KV other) { return k != other.k || v != other.v ;}\n\tKV(int ak, int av) {k=ak;v=av;}\n};\n\nstruct hashKV {\n\tusing eType = struct KV*;\n\tusing kType = int;\n\t//eType empty() {return new struct KV(-1,-1);}\n\teType empty() {return nullptr;}\n\tkType getKey(eType v) {return v->k;}\n\tint hash(kType v) {return v * 999029;} //hash64_2(v);}\n\t//int hash(kType v) {return hash64_2(v);}\n\t//int cmp(kType v, kType b) {return (v > b) ? 1 : ((v == b) ? 0 : -1);}\n\tint cmp(kType v, kType b) {return (v == b) ? 0 : -1;}\n\tbool replaceQ(eType, eType) {return 0;}\n\teType update(eType v, eType) {return v;}\n\tbool cas(eType* p, eType o, eType n) {return\n\t\tatomic_compare_and_swap(p, o, n);}\n};\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/get_time.h",
    "content": "#pragma once\n\n#include <stdlib.h>\n#include <sys/time.h>\n#include <iomanip>\n#include <iostream>\n#include <string>\n\nstruct timer {\n  double total_time;\n  double last_time;\n  bool on;\n  std::string name;\n  struct timezone tzp;\n\n  timer(std::string name = \"PBBS time\", bool _start = true)\n  : total_time(0.0), on(false), name(name), tzp({0,0}) {\n    if (_start) start();\n  }\n\n  double get_time() {\n    timeval now;\n    gettimeofday(&now, &tzp);\n    return ((double) now.tv_sec) + ((double) now.tv_usec)/1000000.;\n  }\n\n  void start () {\n    on = 1;\n    last_time = get_time();\n  }\n\n  double stop () {\n    on = 0;\n    double d = (get_time()-last_time);\n    total_time += d;\n    return d;\n  }\n\n  void reset() {\n     total_time=0.0;\n     on=0;\n  }\n\n  double get_total() {\n    if (on) return total_time + get_time() - last_time;\n    else return total_time;\n  }\n\n  double get_next() {\n    if (!on) return 0.0;\n    double t = get_time();\n    double td = t - last_time;\n    total_time += td;\n    last_time = t;\n    return td;\n  }\n\n  void report(double time, std::string str) {\n    std::ios::fmtflags cout_settings = std::cout.flags();\n    std::cout.precision(4);\n    std::cout << std::fixed;\n    std::cout << name << \": \";\n    if (str.length() > 0)\n      std::cout << str << \": \";\n    std::cout << time << std::endl;\n    std::cout.flags(cout_settings);\n  }\n\n  void total() {\n    report(get_total(),\"total\");\n    total_time = 0.0;\n  }\n\n  void reportTotal(std::string str) {\n    report(get_total(), str);\n  }\n\n  void next(std::string str) {\n    if (on) report(get_next(), str);\n  }\n};\n\nstatic timer _tm;\n#define startTime() _tm.start();\n#define nextTime(_string) _tm.next(_string);\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/hash_table.h",
    "content": "// This code is part of the Problem Based Benchmark Suite (PBBS)\n// Copyright (c) 2010 Guy Blelloch and the PBBS team\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights (to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to\n// permit persons to whom the Software is furnished to do so, subject to\n// the following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#pragma once\n#include \"utilities.h\"\n#include \"sequence_ops.h\"\n\nnamespace pbbs {\n\n\t// A \"history independent\" hash table that supports insertion, and searching\n\t// It is described in the paper\n\t//   Julian Shun and Guy E. Blelloch\n\t//   Phase-concurrent hash tables for determinism\n\t//   SPAA 2014: 96-107\n\t// Insertions can happen in parallel\n\t// Searches can happen in parallel\n\t// Deletion can happen in parallel\n\t// but insertions cannot happen in parallel with searches or deletions\n\t// and searches cannot happen in parallel with deletions\n\t// i.e. each of the three types of operations have to happen in phase\n\ttemplate <class HASH>\n\t\tclass Table {\n\t\t\tprivate:\n\t\t\t\tusing eType = typename HASH::eType;\n\t\t\t\tusing kType = typename HASH::kType;\n\t\t\t\tsize_t m;\n\t\t\t\teType empty;\n\t\t\t\tHASH hashStruct;\n\t\t\t\teType* TA;\n\t\t\t\tusing index = long;\n\n\t\t\t\tstatic void clear(eType* A, size_t n, eType v) {\n\t\t\t\t\tauto f = [&] (size_t i) {\n\t\t\t\t\t\tassign_uninitialized(A[i], v);};\n\t\t\t\t\tparallel_for(0, n, f, granularity(n));\n\t\t\t\t}\n\n\t\t\t\tstruct notEmptyF {\n\t\t\t\t\teType e; notEmptyF(eType _e) : e(_e) {}\n\t\t\t\t\tint operator() (eType a) {return e != a;}};\n\n\t\t\t\tindex hashToRange(index h) {return (int) h % (uint) m;}\n\t\t\t\tindex firstIndex(kType v) {return hashToRange(hashStruct.hash(v));}\n\t\t\t\tindex incrementIndex(index h) {return (h + 1 == (long) m) ? 0 : h+1;}\n\t\t\t\tindex decrementIndex(index h) {return (h == 0) ? m-1 : h-1;}\n\t\t\t\tbool lessIndex(index a, index b) {return (a < b) ? (2*(b-a) < m) : (2*(a-b) > m);}\n\t\t\t\tbool lessEqIndex(index a, index b) {return a==b || lessIndex(a,b);}\n\n\t\t\tpublic:\n\t\t\t\t// Size is the maximum number of values the hash table will hold.\n\t\t\t\t// Overfilling the table could put it into an infinite loop.\n\t\t\t\tTable(size_t size, HASH hashF, float load = 1.5) :\n\t\t\t\t\tm(((size_t) 100.0 + load * size)),\n\t\t\t\t\tempty(hashF.empty()),\n\t\t\t\t\thashStruct(hashF),\n\t\t\t\t\tTA(new_array_no_init<eType>(m)) {\n\t\t\t\t\t\tclear(TA, m, empty); }\n\n\t\t\t\t~Table() { delete_array(TA, m);};\n\n\t\t\t\t// prioritized linear probing\n\t\t\t\t//   a new key will bump an existing key up if it has a higher priority\n\t\t\t\t//   an equal key will replace an old key if replaceQ(new,old) is true\n\t\t\t\t// returns 0 if not inserted (i.e. equal and replaceQ false) and 1 otherwise\n\t\t\t\tbool insert(eType v) {\n\t\t\t\t\tindex i = firstIndex(hashStruct.getKey(v));\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\teType c = TA[i];\n\t\t\t\t\t\tif (c == empty) {\n\t\t\t\t\t\t\tif (hashStruct.cas(&TA[i],c,v)) return true;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tint cmp = hashStruct.cmp(hashStruct.getKey(v),hashStruct.getKey(c));\n\t\t\t\t\t\t\tif (cmp == 0) {\n\t\t\t\t\t\t\t\tif (!hashStruct.replaceQ(v,c)) return false;\n\t\t\t\t\t\t\t\telse if (hashStruct.cas(&TA[i],c,v)) return true;\n\t\t\t\t\t\t\t} else if (cmp < 0)\n\t\t\t\t\t\t\t\ti = incrementIndex(i);\n\t\t\t\t\t\t\telse if (hashStruct.cas(&TA[i],c,v)) {\n\t\t\t\t\t\t\t\tv = c;\n\t\t\t\t\t\t\t\ti = incrementIndex(i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// prioritized linear probing\n\t\t\t\t//   a new key will bump an existing key up if it has a higher priority\n\t\t\t\t//   an equal key will replace an old key if replaceQ(new,old) is true\n\t\t\t\t// returns 0 if not inserted (i.e. equal and replaceQ false) and 1 otherwise\n\t\t\t\tbool update(eType v) {\n\t\t\t\t\tindex i = firstIndex(hashStruct.getKey(v));\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\teType c = TA[i];\n\t\t\t\t\t\tif (c == empty) {\n\t\t\t\t\t\t\tif (hashStruct.cas(&TA[i],c,v)) return true;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tint cmp = hashStruct.cmp(hashStruct.getKey(v),hashStruct.getKey(c));\n\t\t\t\t\t\t\tif (cmp == 0) {\n\t\t\t\t\t\t\t\tif (!hashStruct.replaceQ(v,c)) return false;\n\t\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\t\teType new_val = hashStruct.update(c,v);\n\t\t\t\t\t\t\t\t\tif (hashStruct.cas(&TA[i],c,new_val)) return true;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else if (cmp < 0)\n\t\t\t\t\t\t\t\ti = incrementIndex(i);\n\t\t\t\t\t\t\telse if (hashStruct.cas(&TA[i],c,v)) {\n\t\t\t\t\t\t\t\tv = c;\n\t\t\t\t\t\t\t\ti = incrementIndex(i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbool deleteVal(kType v) {\n\t\t\t\t\tindex i = firstIndex(v);\n\t\t\t\t\tint cmp;\n\n\t\t\t\t\t// find first element less than or equal to v in priority order\n\t\t\t\t\tindex j = i;\n\t\t\t\t\teType c = TA[j];\n\n\t\t\t\t\tif (c == empty) return true;\n\n\t\t\t\t\t// find first location with priority less or equal to v's priority\n\t\t\t\t\twhile ((cmp = (c==empty) ? 1 : hashStruct.cmp(v, hashStruct.getKey(c))) < 0) {\n\t\t\t\t\t\tj = incrementIndex(j);\n\t\t\t\t\t\tc = TA[j];\n\t\t\t\t\t}\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\t// Invariants:\n\t\t\t\t\t\t//   v is the key that needs to be deleted\n\t\t\t\t\t\t//   j is our current index into TA\n\t\t\t\t\t\t//   if v appears in TA, then at least one copy must appear at or before j\n\t\t\t\t\t\t//   c = TA[j] at some previous time (could now be changed)\n\t\t\t\t\t\t//   i = h(v)\n\t\t\t\t\t\t//   cmp = compare v to key of c (positive if greater, 0 equal, negative less)\n\t\t\t\t\t\tif (cmp != 0) {\n\t\t\t\t\t\t\t// v does not match key of c, need to move down one and exit if\n\t\t\t\t\t\t\t// moving before h(v)\n\t\t\t\t\t\t\tif (j == i) return true;\n\t\t\t\t\t\t\tj = decrementIndex(j);\n\t\t\t\t\t\t\tc = TA[j];\n\t\t\t\t\t\t\tcmp = (c == empty) ? 1 : hashStruct.cmp(v, hashStruct.getKey(c));\n\t\t\t\t\t\t} else { // found v at location j (at least at some prior time)\n\n\t\t\t\t\t\t\t// Find next available element to fill location j.\n\t\t\t\t\t\t\t// This is a little tricky since we need to skip over elements for\n\t\t\t\t\t\t\t// which the hash index is greater than j, and need to account for\n\t\t\t\t\t\t\t// things being moved downwards by others as we search.\n\t\t\t\t\t\t\t// Makes use of the fact that values in a cell can only decrease\n\t\t\t\t\t\t\t// during a delete phase as elements are moved from the right to left.\n\t\t\t\t\t\t\tindex jj = incrementIndex(j);\n\t\t\t\t\t\t\teType x = TA[jj];\n\t\t\t\t\t\t\twhile (x != empty && lessIndex(j, firstIndex(hashStruct.getKey(x)))) {\n\t\t\t\t\t\t\t\tjj = incrementIndex(jj);\n\t\t\t\t\t\t\t\tx = TA[jj];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tindex jjj = decrementIndex(jj);\n\t\t\t\t\t\t\twhile (jjj != j) {\n\t\t\t\t\t\t\t\teType y = TA[jjj];\n\t\t\t\t\t\t\t\tif (y == empty || !lessIndex(j, firstIndex(hashStruct.getKey(y)))) {\n\t\t\t\t\t\t\t\t\tx = y;\n\t\t\t\t\t\t\t\t\tjj = jjj;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tjjj = decrementIndex(jjj);\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// try to copy the the replacement element into j\n\t\t\t\t\t\t\tif (hashStruct.cas(&TA[j],c,x)) {\n\t\t\t\t\t\t\t\t// swap was successful\n\t\t\t\t\t\t\t\t// if the replacement element was empty, we are done\n\t\t\t\t\t\t\t\tif (x == empty) return true;\n\n\t\t\t\t\t\t\t\t// Otherwise there are now two copies of the replacement element x\n\t\t\t\t\t\t\t\t// delete one copy (probably the original) by starting to look at jj.\n\t\t\t\t\t\t\t\t// Note that others can come along in the meantime and delete\n\t\t\t\t\t\t\t\t// one or both of them, but that is fine.\n\t\t\t\t\t\t\t\tv = hashStruct.getKey(x);\n\t\t\t\t\t\t\t\tj = jj;\n\t\t\t\t\t\t\t\ti = firstIndex(v);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tc = TA[j];\n\t\t\t\t\t\t\tcmp = (c == empty) ? 1 : hashStruct.cmp(v, hashStruct.getKey(c));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Returns the value if an equal value is found in the table\n\t\t\t\t// otherwise returns the \"empty\" element.\n\t\t\t\t// due to prioritization, can quit early if v is greater than cell\n\t\t\t\teType find(kType v) {\n\t\t\t\t\tindex h = firstIndex(v);\n\t\t\t\t\teType c = TA[h];\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\tif (c == empty) {return empty;}\n\t\t\t\t\t\tint cmp = hashStruct.cmp(v,hashStruct.getKey(c));\n\t\t\t\t\t\tif (cmp >= 0) {\n\t\t\t\t\t\t\t/*Ju we disable >0 case, because the +1 is not defined for our JitRequest*/\n\t\t\t\t\t\t\tif (cmp > 0) return empty;\n\t\t\t\t\t\t\telse return c;\n\t\t\t\t\t\t\t//return c;\n\t\t\t\t\t\t}\n\t\t\t\t\t\th = incrementIndex(h);\n\t\t\t\t\t\tc = TA[h];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// returns the number of entries\n\t\t\t\tsize_t count() {\n\t\t\t\t\tauto is_full = [&] (size_t i) -> size_t {\n\t\t\t\t\t\treturn (TA[i] == empty) ? 0 : 1;};\n\t\t\t\t\treturn reduce(delayed_seq<size_t>(m, is_full), addm<size_t>());\n\t\t\t\t}\n\n\t\t\t\t// returns all the current entries compacted into a sequence\n\t\t\t\tsequence<eType> entries() {\n\t\t\t\t\treturn filter(range<eType*>(TA, TA+m),\n\t\t\t\t\t\t\t[&] (eType v) {return v != empty;});\n\t\t\t\t}\n\n\t\t\t\tindex findIndex(kType v) {\n\t\t\t\t\tindex h = firstIndex(v);\n\t\t\t\t\teType c = TA[h];\n\t\t\t\t\twhile (true) {\n\t\t\t\t\t\tif (c == empty) return -1;\n\t\t\t\t\t\tint cmp = hashStruct.cmp(v,hashStruct.getKey(c));\n\t\t\t\t\t\tif (cmp >= 0) {\n\t\t\t\t\t\t\tif (cmp > 0) return -1;\n\t\t\t\t\t\t\telse return h;\n\t\t\t\t\t\t}\n\t\t\t\t\t\th = incrementIndex(h);\n\t\t\t\t\t\tc = TA[h];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsequence<index> get_index() {\n\t\t\t\t\tauto is_full = [&] (const size_t i) -> int {\n\t\t\t\t\t\tif (TA[i] != empty) return 1; else return 0;};\n\t\t\t\t\tsequence<index> x(m, is_full);\n\t\t\t\t\tscan_inplace(x.slice(), addm<index>());\n\t\t\t\t\treturn x;\n\t\t\t\t}\n\n\t\t\t\t// prints the current entries along with the index they are stored at\n\t\t\t\tvoid print() {\n\t\t\t\t\tcout << \"vals = \";\n\t\t\t\t\tfor (size_t i=0; i < m; i++)\n\t\t\t\t\t\tif (TA[i] != empty)\n\t\t\t\t\t\t\tcout << i << \":\" << TA[i] << \",\";\n\t\t\t\t\tcout << endl;\n\t\t\t\t}\n\t\t};\n\n\ttemplate <class ET, class H>\n\t\tsequence<ET> remove_duplicates(sequence<ET> const &S, H const &hash, size_t m=0) {\n\t\t\ttimer t(\"remove duplicates\", false);\n\t\t\tif (m==0) m = S.size();\n\t\t\tTable<H> T(m, hash, 1.3);\n\t\t\tt.next(\"build table\");\n\t\t\tparallel_for(0, S.size(), [&] (size_t i) { T.insert(S[i]);});\n\t\t\tt.next(\"insert\");\n\t\t\tsequence<ET> result = T.entries();\n\t\t\tt.next(\"entries\");\n\t\t\treturn result;\n\t\t}\n\n\t// T must be some integer type\n\ttemplate <class T>\n\t\tstruct hashInt {\n\t\t\tusing eType = T;\n\t\t\tusing kType = T;\n\t\t\teType empty() {return -1;}\n\t\t\tkType getKey(eType v) {return v;}\n\t\t\tT hash(kType v) {return v * 999029;} //hash64_2(v);}\n\tint cmp(kType v, kType b) {return (v > b) ? 1 : ((v == b) ? 0 : -1);}\n\tbool replaceQ(eType, eType) {return 0;}\n\teType update(eType v, eType) {return v;}\n\tbool cas(eType* p, eType o, eType n) {return\n\t\tatomic_compare_and_swap(p, o, n);}\n};\n\n// works for non-negative integers (uses -1 to mark cell as empty)\ntemplate <class T>\nsequence<T> remove_duplicates(sequence<T> const &A) {\n\treturn remove_duplicates(A, hashInt<T>());\n}\n\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/main.cc",
    "content": "#include \"hash_table.h\"\n#include \"benchmark_lprobe.h\"\n#include \"data.h\"\nusing namespace pbbs;\n//#define DEFAULT_OP_COUNT     2000\n//#define DEFAULT_THREAD_COUNT 2\n//#define DEFAULT_READ_PERCENT 90\n//#define DEFAULT_LOAD_FACTOR  40\n//#define CAPACITY             8000016\n//#define CAPACITY             800000\n\n#define DEFAULT_OP_COUNT     2000000\n#define DEFAULT_THREAD_COUNT 24\n#define DEFAULT_READ_PERCENT 90\n#define DEFAULT_LOAD_FACTOR  40\n#define CAPACITY             8000016\n\n\nint main() {\n\n  int  op_count     = DEFAULT_OP_COUNT; \n  int  num_threads  = DEFAULT_THREAD_COUNT;\n  int  read_percent = DEFAULT_READ_PERCENT;\n  int  load_factor  = DEFAULT_LOAD_FACTOR;\n\n  int    rweight  = read_percent;\n  int    idweight = 100 - read_percent;\n/*\n\tTable<hashKV> T(100000, hashKV(), 1.3);\n\tT.insert({1,2});\n\tT.insert({2,45});\n\tstruct KV res  = T.find(2);\n\tstd::cout << \"return value is \" << res.v << std::endl;\n*/\n\n  BenchmarkLockFreeHT benchmark_lockfree_ht(op_count, CAPACITY, rweight, idweight, num_threads, 0.3);\n  benchmark_lockfree_ht.run();\n\n\treturn 0;\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/memory_size.h",
    "content": "/*\n * Author:  David Robert Nadeau\n * Site:    http://NadeauSoftware.com/\n * License: Creative Commons Attribution 3.0 Unported License\n *          http://creativecommons.org/licenses/by/3.0/deed.en_US\n */\n\n#pragma once\n\n#if defined(_WIN32)\n#include <Windows.h>\n\n#elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__))\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/param.h>\n#if defined(BSD)\n#include <sys/sysctl.h>\n#endif\n\n#else\n#error \"Unable to define getMemorySize( ) for an unknown OS.\"\n#endif\n\n\n\n/**\n * Returns the size of physical memory (RAM) in bytes.\n */\nstatic size_t getMemorySize( )\n{\n#if defined(_WIN32) && (defined(__CYGWIN__) || defined(__CYGWIN32__))\n  /* Cygwin under Windows. ------------------------------------ */\n  /* New 64-bit MEMORYSTATUSEX isn't available.  Use old 32.bit */\n  MEMORYSTATUS status;\n  status.dwLength = sizeof(status);\n  GlobalMemoryStatus( &status );\n  return (size_t)status.dwTotalPhys;\n\n#elif defined(_WIN32)\n  /* Windows. ------------------------------------------------- */\n  /* Use new 64-bit MEMORYSTATUSEX, not old 32-bit MEMORYSTATUS */\n  MEMORYSTATUSEX status;\n  status.dwLength = sizeof(status);\n  GlobalMemoryStatusEx( &status );\n  return (size_t)status.ullTotalPhys;\n\n#elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__))\n  /* UNIX variants. ------------------------------------------- */\n  /* Prefer sysctl() over sysconf() except sysctl() HW_REALMEM and HW_PHYSMEM */\n\n#if defined(CTL_HW) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM64))\n  int mib[2];\n  mib[0] = CTL_HW;\n#if defined(HW_MEMSIZE)\n  mib[1] = HW_MEMSIZE;/* OSX. --------------------- */\n#elif defined(HW_PHYSMEM64)\n  mib[1] = HW_PHYSMEM64;/* NetBSD, OpenBSD. --------- */\n  #endif\n  int64_t size = 0;/* 64-bit */\n  size_t len = sizeof( size );\n  if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 )\n    return (size_t)size;\n  return 0L;/* Failed? */\n\n#elif defined(_SC_AIX_REALMEM)\n  /* AIX. ----------------------------------------------------- */\n  return (size_t)sysconf( _SC_AIX_REALMEM ) * (size_t)1024L;\n\n#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)\n  /* FreeBSD, Linux, OpenBSD, and Solaris. -------------------- */\n  return (size_t)sysconf( _SC_PHYS_PAGES ) *\n    (size_t)sysconf( _SC_PAGESIZE );\n\n#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGE_SIZE)\n  /* Legacy. -------------------------------------------------- */\n  return (size_t)sysconf( _SC_PHYS_PAGES ) *\n    (size_t)sysconf( _SC_PAGE_SIZE );\n\n#elif defined(CTL_HW) && (defined(HW_PHYSMEM) || defined(HW_REALMEM))\n  /* DragonFly BSD, FreeBSD, NetBSD, OpenBSD, and OSX. -------- */\n  int mib[2];\n  mib[0] = CTL_HW;\n#if defined(HW_REALMEM)\n  mib[1] = HW_REALMEM;/* FreeBSD. ----------------- */\n#elif defined(HW_PYSMEM)\n  mib[1] = HW_PHYSMEM;/* Others. ------------------ */\n  #endif\n  unsigned int size = 0;/* 32-bit */\n  size_t len = sizeof( size );\n  if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 )\n    return (size_t)size;\n  return 0L;/* Failed? */\n#endif /* sysctl and sysconf variants */\n\n  #else\n  return 0L;/* Unknown OS. */\n  #endif\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/monoid.h",
    "content": "#pragma once\n#include <limits>\n#include <tuple>\n#include <array>\n\n// Definition of various monoids\n// each consists of:\n//   T : type of the values\n//   static T identity() : returns identity for the monoid\n//   static T add(T, T) : adds two elements, must be associative\n\nnamespace pbbs {\n\n  template <class F, class TT>\n  struct monoid {\n    using T = TT;\n    F f;\n    TT identity;\n    monoid(F f, TT id) : f(f), identity(id) {}\n  };\n\n  template <class F, class T>\n  monoid<F,T> make_monoid (F f, T id) {\n    return monoid<F,T>(f, id);\n  }\n\n  template <class M1, class M2>\n  auto pair_monoid (M1 m1, M2 m2) {\n    using P = std::pair<typename M1::T, typename M2::T>;\n    auto f = [&] (P a, P b) {\n      return P(m1.f(a.first, b.first), m2.f(a.second, b.second));};\n    return make_monoid(f, P(m1.identity, m2.identity));\n  }\n\n  template <class M, size_t n>\n  auto array_monoid (M m) {\n    using Ar = std::array<typename M::T, n>;\n    auto f = [&] (Ar a, Ar b) {\n      Ar r;\n      for (size_t i=0; i < n; i++)\n\tr[i] = m.f(a[i], b[i]);\n      return r;\n    };\n    Ar id;\n    for (size_t i=0; i < n; i++) id[i] = m.identity;\n    return make_monoid(f, id);\n  }\n  \n  template <class TT>\n  struct addm {\n    using T = TT;\n    addm() : identity(0) {}\n    T identity;\n    static T f(T a, T b) {return a + b;}\n  };\n\n  template <class T>\n  T lowest() {return std::numeric_limits<T>::lowest();}\n\n  template <class T>\n  T highest() {return std::numeric_limits<T>::max();}\n\n  template <class TT>\n  struct maxm{\n    using T = TT;\n    maxm() : identity(lowest<T>()) {}\n    T identity;\n    static T f(T a, T b) {return std::max(a,b);}\n  };\n\n  template <class T1, class T2>\n  struct maxm<std::pair<T1,T2>> {\n    using T = std::pair<T1,T2>;\n    maxm() : identity(std::make_pair(lowest<T1>(), lowest<T2>())) {}\n    T identity;\n    static T f(T a, T b) {return std::max(a,b);}\n  };\n\n  template <class TT>\n  struct minm {\n    using T = TT;\n    minm() : identity(highest<T>()) {}\n    T identity;\n    static T f(T a, T b) {return std::min(a,b);}\n  };\n\n  template <class T1, class T2>\n  struct minm<std::pair<T1,T2>> {\n    using T = std::pair<T1,T2>;\n    minm() : identity(std::make_pair(highest<T1>(), highest<T2>())) {}\n    T identity;\n    static T f(T a, T b) {return std::max(a,b);}\n  };\n\n  template <class TT>\n  struct xorm {\n    using T = TT;\n    xorm() : identity(0) {}\n    T identity;\n    static T f(T a, T b) {return a ^ b;}\n  };\n\n  template <class TT>\n  struct minmaxm {\n    using T = std::pair<TT,TT>;\n    minmaxm() : identity(T(highest<T>(), lowest<T>())) {}\n    T identity;\n    static T f(T a, T b) {return T(std::min(a.first,b.first),\n\t\t\t\t   std::max(a.second,b.second));}\n  };\n\n  template <class TT>\n  struct Add {\n    using T = TT;\n    static T identity() {return (T) 0;}\n    static T add(T a, T b) {return a + b;}\n  };\n\n  template <class TT>\n  struct Max {\n    using T = TT;\n    static T identity() {\n      return (T) std::numeric_limits<T>::min();}\n    static T add(T a, T b) {return std::max(a,b);}\n  };\n\n  template <class TT>\n  struct Min {\n    using T = TT;\n    static T identity() {\n      return (T) std::numeric_limits<T>::max();}\n    static T add(T a, T b) {return std::min(a,b);}\n  };\n\n  template <class A1, class A2>\n  struct Add_Pair {\n    using T = std::pair<typename A1::T, typename A2::T>;\n    static T identity() {return T(A1::identity(), A2::identity());}\n    static T add(T a, T b) {\n      return T(A1::add(a.first,b.first), A2::add(a.second,b.second));}\n  };\n\n  template <class AT>\n  struct Add_Array {\n    using S = std::tuple_size<AT>;\n    using T = std::array<typename AT::value_type, S::value>;\n    static T identity() {\n      T r;\n      for (size_t i=0; i < S::value; i++)\n\tr[i] = 0;\n      return r;\n    }\n    static T add(T a, T b) {\n      T r;\n      for (size_t i=0; i < S::value; i++)\n\tr[i] = a[i] + b[i];\n      return r;\n    }\n  };\n\n  template <class AT>\n  struct Add_Nested_Array {\n    using T = AT;\n    using S = std::tuple_size<T>;\n    using SS = std::tuple_size<typename AT::value_type>;\n    static T identity() {\n      T r;\n      for (size_t i=0; i < S::value; i++)\n\tfor (size_t j=0; j < SS::value; j++) r[i][j] = 0;\n      return r;\n    }\n    static T add(T a, T b) {\n      T r;\n      for (size_t i=0; i < S::value; i++)\n\tfor (size_t j=0; j < SS::value; j++)\n\t  r[i][j] = a[i][j] + b[i][j];\n      return r;\n    }\n  };\n\n}\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/parallel.h",
    "content": "#pragma once\n\n//***************************************\n// All the pbbs library uses only four functions for\n// accessing parallelism.\n// These can be implemented on top of any scheduler.\n//***************************************\n// number of threads available from OS\n//template <>\nstatic int num_workers();\n\n// id of running thread, should be numbered from [0...num-workers)\nstatic int worker_id();\n\n// the granularity of a simple loop (e.g. adding one to each element\n// of an array) to reasonably hide cost of scheduler\n// #define PAR_GRANULARITY 2000\n\n// parallel loop from start (inclusive) to end (exclusive) running\n// function f.\n//    f should map long to void.\n//    granularity is the number of iterations to run sequentially\n//      if 0 (default) then the scheduler will decide\n//    conservative uses a safer scheduler\ntemplate <typename F>\nstatic void parallel_for(long start, long end, F f,\n\t\t\t long granularity = 0,\n\t\t\t bool conservative = false);\n\n// runs the thunks left and right in parallel.\n//    both left and write should map void to void\n//    conservative uses a safer scheduler\ntemplate <typename Lf, typename Rf>\nstatic void par_do(Lf left, Rf right, bool conservative=false);\n\n//***************************************\n\n// cilkplus\n#if defined(CILK)\n#include <cilk/cilk.h>\n#include <cilk/cilk_api.h>\n#include <iostream>\n#include <sstream>\n#define PAR_GRANULARITY 2000\n\ninline int num_workers() {return __cilkrts_get_nworkers();}\ninline int worker_id() {return __cilkrts_get_worker_number();}\ninline void set_num_workers(int) {\n  throw std::runtime_error(\"don't know how to set worker count!\");\n}\n\n// Not sure this still works\n//__cilkrts_end_cilk();\n//  std::stringstream ss; ss << n;\n//  if (0 != __cilkrts_set_param(\"nworkers\", ss.str().c_str())) \n\n\ntemplate <typename Lf, typename Rf>\ninline void par_do(Lf left, Rf right, bool) {\n    cilk_spawn right();\n    left();\n    cilk_sync;\n}\n\ntemplate <typename F>\ninline void parallel_for(long start, long end, F f,\n\t\t\t long granularity,\n\t\t\t bool) {\n  if (granularity == 0)\n    cilk_for(long i=start; i<end; i++) f(i);\n  else if ((end - start) <= granularity)\n    for (long i=start; i < end; i++) f(i);\n  else {\n    long n = end-start;\n    long mid = (start + (9*(n+1))/16);\n    cilk_spawn parallel_for(start, mid, f, granularity);\n    parallel_for(mid, end, f, granularity);\n    cilk_sync;\n  }\n}\n\n// openmp\n#elif defined(OPENMP)\n#include <omp.h>\n#define PAR_GRANULARITY 200000\n\ninline int num_workers() { return omp_get_max_threads(); }\ninline int worker_id() { return omp_get_thread_num(); }\ninline void set_num_workers(int n) { omp_set_num_threads(n); }\n\ntemplate <class F>\ninline void parallel_for(long start, long end, F f,\n\t\t\t long granularity,\n\t\t\t bool conservative) {\n  _Pragma(\"omp parallel for\")\n    for(long i=start; i<end; i++) f(i);\n}\n\nbool in_par_do = false;\n\ntemplate <typename Lf, typename Rf>\ninline void par_do(Lf left, Rf right, bool conservative) {\n  if (!in_par_do) {\n    in_par_do = true;  // at top level start up tasking\n#pragma omp parallel\n#pragma omp single\n#pragma omp task\n    left();\n#pragma omp task\n    right();\n#pragma omp taskwait\n    in_par_do = false;\n  } else {   // already started\n#pragma omp task\n    left();\n#pragma omp task\n    right();\n#pragma omp taskwait\n  }\n}\n\ntemplate <typename Job>\ninline void parallel_run(Job job, int num_threads=0) {\n  job();\n}\n\n// Guy's scheduler (ABP)\n#elif defined(HOMEGROWN)\n#include \"scheduler.h\"\n\n#ifdef NOTMAIN\nextern fork_join_scheduler fj;\n#else\nfork_join_scheduler fj;\n#endif\n\n// Calls fj.destroy() before the program exits\ninline void destroy_fj() {\n  fj.destroy();\n}\n\nstruct __atexit {__atexit() {std::atexit(destroy_fj);}};\nstatic __atexit __atexit_var;\n\n#define PAR_GRANULARITY 512\n\ninline int num_workers() {\n  return fj.num_workers();\n}\n\ninline int worker_id() {\n  return fj.worker_id();\n}\n\ninline void set_num_workers(int n) {\n  fj.set_num_workers(n);\n}\n\ntemplate <class F>\ninline void parallel_for(long start, long end, F f,\n\t\t\t long granularity,\n\t\t\t bool conservative) {\n  if (end > start)\n    fj.parfor(start, end, f, granularity, conservative);\n}\n\ntemplate <typename Lf, typename Rf>\ninline void par_do(Lf left, Rf right, bool conservative) {\n  return fj.pardo(left, right, conservative);\n}\n\ntemplate <typename Job>\ninline void parallel_run(Job job, int) {\n  job();\n}\n\n// c++\n#else\n\ninline int num_workers() { return 1;}\ninline int worker_id() { return 0;}\ninline void set_num_workers(int) { ; }\n#define PAR_GRANULARITY 1000\n\ntemplate <class F>\ninline void parallel_for(long start, long end, F f,\n\t\t\t long,   // granularity,\n\t\t\t bool) { // conservative) {\n  for (long i=start; i<end; i++) {\n    f(i);\n  }\n}\n\ntemplate <typename Lf, typename Rf>\ninline void par_do(Lf left, Rf right, bool) { // conservative) {\n  left(); right();\n}\n\ntemplate <typename Job>\ninline void parallel_run(Job job, int) { // num_threads=0) {\n  job();\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/seq.h",
    "content": "#pragma once\n\n#include \"utilities.h\"\n#include \"alloc.h\"\n#include <initializer_list>\n#include <iterator>\n\n#ifdef CONCEPTS\ntemplate<typename T>\nconcept bool Seq =\n  requires(T t, size_t u) {\n  typename T::value_type;\n  { t.size() } -> size_t;\n  { t.slice() };\n  { t[u] };\n};\n\ntemplate<typename T>\nconcept bool Range =\n  Seq<T> && requires(T t, size_t u) {\n  { t[u] } -> typename T::value_type&;\n  typename T::iterator;\n};\n#define SEQ Seq\n#define RANGE Range\n#else\n#define SEQ typename\n#define RANGE typename\n#endif\n\nnamespace pbbs {\n\n  constexpr bool report_copy = false;\n  constexpr bool bounds_check = false;\n  \n  template <typename Iterator>\n  struct range {\n  public:\n    using value_type = typename std::iterator_traits<Iterator>::value_type;\n    using iterator = Iterator;\n    range() {};\n    range(iterator s, iterator e) : s(s), e(e) {};\n    value_type& operator[] (const size_t i) const {return s[i];}\n    range slice(size_t ss, size_t ee) const {\n      return range(s + ss, s + ee); }\n    range slice() const {return range(s,e);};\n    size_t size() const { return e - s;}\n    iterator begin() const {return s;}\n    iterator end() const {return e;}\n\n    range<std::reverse_iterator<value_type*>>\n    rslice(size_t ss, size_t ee) const {\n      auto i = std::make_reverse_iterator(e);\n      return range<decltype(i)>(i + ss, i + ee);\n    }\n    range<std::reverse_iterator<value_type*>>\n    rslice() const {return rslice(0, std::distance(s,e));};\n\n  private:\n    iterator s;\n    iterator e;\n  };\n\n  template <class Iter>\n  range<Iter> make_range(Iter s, Iter e) {\n    return range<Iter>(s,e);\n  }\n\n  template <typename T, typename F>\n  struct delayed_sequence {\n    using value_type = T;\n    delayed_sequence(size_t n, F _f) : f(_f), s(0), e(n) {};\n    delayed_sequence(size_t n, value_type v) : f([&] (size_t) {return v;}), s(0), e(n) {};\n    delayed_sequence(size_t s, size_t e, F _f) : f(_f), s(s), e(e) {};\n    const value_type operator[] (size_t i) const {return (f)(i+s);}\n    delayed_sequence<T,F> slice(size_t ss, size_t ee) const {\n      return delayed_sequence<T,F>(s+ss,s+ee,f); }\n    delayed_sequence<T,F> slice() const {\n      return delayed_sequence<T,F>(s,e,f); }\n    size_t size() const { return e - s;}\n  private:\n    F f;\n    const size_t s, e;\n  };\n\n  // used so second template argument can be inferred\n  template <class T, class F>\n  delayed_sequence<T,F> delayed_seq (size_t n, F f) {\n    return delayed_sequence<T,F>(n,f);\n  }\n\n  template <class F>\n  auto dseq (size_t n, F f) -> delayed_sequence<decltype(f(0)),F>\n  {\n    using T = decltype(f(0));\n    return delayed_sequence<T,F>(n,f);\n  }\n\n  template <typename T, typename Allocator=pbbs::allocator<T>>\n  struct sequence {\n  public:\n    using value_type = T;\n    //using iterator = T*;\n\n    sequence() { empty(); }\n\n    // copy constructor\n    sequence(const sequence& a) {\n      if (report_copy && !a.is_small())\n\tcout << \"copy constructor: len: \" << a.size()\n\t     << \" element size: \" << sizeof(value_type) << endl;\n      if (a.is_small()) val = a.val;\n      else copy_from(a.val.large.s, a.val.large.n);\n    }\n\n    // move constructor\n    sequence(sequence&& a) {\n      val = a.val; a.empty();}\n\n    // // copy assignment\n    // sequence& operator = (const sequence& a) {\n    //   if (report_copy && !a.is_small())\n    // \tcout << \"copy assignment: len: \" << a.size()\n    // \t     << \" element size: \" << sizeof(T) << endl;\n    //   if (this != &a) {\n    // \tclear(); \n    // \tif (a.is_small()) val = a.val;\n    // \telse copy_from(a.val.large.s, a.val.large.n);}\n    //   return *this;\n    // }\n\n    // //move assignment\n    // sequence& operator = (sequence&& a) {\n    //   if (this != &a) {clear(); val = a.val; a.empty();}\n    //   return *this;\n    // }\n\n    // unified copy/move assignment using the copy and swap idiom\n    // now safer for exceptions\n    sequence& operator = (sequence a) {\n      swap(a);\n      return *this;\n    }\n\n    // constructs a sequence of length sz\n    // with each element default constructed\n    sequence(const size_t sz) {\n      alloc(sz);}\n\n    // constructs a sequence of length sz initialized with v\n    sequence(const size_t sz, value_type v) {\n      T* start = alloc_no_init(sz);\n      parallel_for(0, sz, [=] (size_t i) {\n\t  assign_uninitialized(start[i], (value_type) v);}, 300);\n    };\n\n    // constructs a sequence by applying f to indices [0, ..., sz-1]\n    template <typename Func>\n    sequence(const size_t sz, Func f, size_t granularity=300) {\n      value_type* start = alloc_no_init(sz);\n      parallel_for(0, sz, [&] (size_t i) {\n\t  assign_uninitialized<value_type>(start[i], f(i));}, granularity);\n    };\n\n    // construct a sequence from initializer list\n    sequence(std::initializer_list<value_type> l) {\n      size_t sz = l.end() - l.begin();\n      value_type* start = alloc(sz);\n      size_t i = 0;\n      for (value_type a : l) start[i++] = a;\n    }\n\n    // constructs from a range\n    template <typename Iter>\n    sequence(range<Iter> const &a) {\n      copy_from(a.begin(), a.size());\n    }\n\n    // constructs from a delayed sequence\n    template <class F>\n    sequence(delayed_sequence<value_type,F> const &a) {\n      copy_from(a, a.size());\n    }\n\n    // uninitialized sequence of length sz\n    // dangerous if non primitive types and not immediately initialized\n    static sequence<value_type> no_init(const size_t sz) {\n      sequence<value_type> r;\n      r.alloc_no_init(sz);\n      return r;\n    };\n\n    // Constructs a sequence by taking ownership of an\n    // allocated value_type array.\n    // Only use if a is allocated by the same allocator as \n    // the sequence since the sequence delete will destruct it.\n    sequence(value_type* a, const size_t sz) {\n      set(a, sz);\n      // cout << \"dangerous: \" << size();\n    };\n\n    // Copies a Seq type \n    // Uses enable_if to avoid matching on integer argument, which creates\n    // a sequece of the specified length\n    //template <class Seq, typename std::enable_if_t<!std::is_integral<Seq>::value>>\n    //sequence(Seq const &a) {\n    //  copy_from(a.begin(), a.size());\n    //}\n\n    ~sequence() { clear();}\n\n    range<value_type*> slice(size_t ss, size_t ee) const {\n      return range<value_type*>(begin() + ss, begin() + ee);\n    }\n\n    range<std::reverse_iterator<value_type*>>\n    rslice(size_t ss, size_t ee) const {\n      auto iter = std::make_reverse_iterator(begin() + size());\n      return range<decltype(iter)>(iter + ss, iter + ee);\n    }\n\n    range<std::reverse_iterator<value_type*>>\n    rslice() const {return rslice(0, size());};\n\n    range<value_type*> slice() const {\n      return range<value_type*>(begin(), begin() + size());\n    }\n\n    // gives up ownership, returning an array of the elements\n    // only use if will be freed by same allocator as sequence\n    value_type* to_array() {\n      value_type* r = begin(); empty(); return r;}\n\n    // frees the memory assuming elements are already destructed,\n    // and sets pointer to Null (empty());\n    void clear_no_destruct() {\n      if (size() != 0 && !is_small()) \n\t//pbbs::free_array(val.large.s);\n\tAllocator().deallocate(val.large.s, val.large.n);\n      empty();\n    }\n\n    // destructs the sequence\n    void clear() {\n      delete_elements();\n      clear_no_destruct();\n    }\n    \n    value_type& operator[] (const size_t i) const {\n      if (bounds_check && i >= size()) \n      \tthrow std::out_of_range(\"in sequence access: length = \"\n\t\t\t\t+ std::to_string(size())\n\t\t\t\t+ \" index = \" + std::to_string(i));\n      return begin()[i];\n    }\n\n    value_type& get(const size_t i) const {\n      return begin()[i];\n    }\n\n    void swap(sequence& b) {\n      std::swap(val.large.s, b.val.large.s);\n      std::swap(val.large.n, b.val.large.n);\n    }\n\n    size_t size() const {\n      if (is_small()) return val.small[flag_loc];\n      return val.large.n;}\n\n    value_type* begin() const {\n      if (is_small()) return (value_type*) &val.small;\n      return val.large.s;}\n\n    value_type* end() const {return begin() + size();}\n\n  private:\n\n    struct lg { value_type *s; size_t n; };\n    static constexpr size_t lg_size = sizeof(lg);\n    static constexpr size_t T_size = sizeof(value_type);\n    static constexpr size_t max_sso_size = 8;\n    static constexpr size_t flag_loc = 15;\n    // For future use in c++20\n    // --- (std::endian::native == std::endian::big) ? 8 : 15;\n\n    // Uses short string optimization (SSO).\n    // Applied if T_size <= max_sso_size\n    // Stores flag in byte 15 (flag_loc) of the small array\n    // It assumes the machine is little_endian so this is\n    // the high order bits of the size field (n)\n    union {\n      lg large;\n      char small[lg_size]; // for SSO\n    } val;\n\n    // sets start and size\n    void set(value_type* start, size_t sz) {\n      val.large.n = sz;\n      val.large.s = start;\n    }\n      \n    // marks as empty\n    void empty() {set(NULL, 0);}\n\n    // is a given size small\n    inline bool is_small(size_t sz) const {\n      return ((T_size <= max_sso_size) &&\n\t      sz < (lg_size/T_size) &&\n\t      sz > 0); }\n\n    // am I small\n    inline bool is_small() const {\n      //return is_small(val.small[flag_loc]);\n      if (T_size <= max_sso_size) {\n      \tsize_t sz = val.small[flag_loc];\n      \treturn (sz > 0 && sz < (lg_size/T_size));\n      }\n      return false;\n    }\n    \n    void initialize_elements() {\n      if (!std::is_trivially_default_constructible<value_type>::value) \n\tparallel_for(0, size(), [&] (size_t i) {\n\t    new ((void*) (begin()+i)) value_type;});\n    }\n\n    void delete_elements() {\n      if (!std::is_trivially_destructible<value_type>::value)\n\tparallel_for(0, size(), [&] (size_t i) {\n\t    (begin()+i)->~value_type();});\n    }\n\n    // allocate and set size without initialization\n    value_type* alloc_no_init(size_t sz) {\n      if (is_small(sz)) {\n\tval.small[flag_loc] = sz;\n\treturn (value_type*) &val.small;\n      } else {\n\t//T* loc = (sz == 0) ? NULL : pbbs::new_array_no_init<T>(sz);\n\tvalue_type* loc = (sz == 0) ? NULL : Allocator().allocate(sz); \n\tset(loc, sz);\n\treturn loc;\n      }\n    }\n\n    // allocate and set size with initialization\n    value_type* alloc(size_t sz) {\n      value_type* loc = alloc_no_init(sz);\n      initialize_elements();\n      return loc;\n    }\n\n    // Allocates and copies sequence from random access iterator\n    // Only used if not short string optimized.\n    template <class Iter>\n    void copy_from(Iter a, size_t sz) {\n      value_type* start = alloc_no_init(sz); \n      parallel_for(0, sz, [&] (size_t i) {\n\t  assign_uninitialized(start[i], a[i]);}, 1000);\n    }\n\n  };\n\n  template <class Iter>\n  bool slice_eq(range<Iter> a, range<Iter> b) {\n    return a.begin() == b.begin();}\n\n  template <class SeqA, class SeqB>\n  bool slice_eq(SeqA, SeqB) { return false;}\n\n  template <class Seq>\n  auto to_sequence(Seq const &s) -> sequence<typename Seq::value_type> {\n    using T = typename Seq::value_type;\n    return sequence<T>(s.size(), [&] (size_t i) {\n\treturn s[i];});\n  }\n\n  template <class F>\n  auto seq (size_t n, F f) -> sequence<decltype(f(0))>\n  {\n    return sequence<decltype(f(0))>(n,f);\n  }\n\n  std::ostream& operator<<(std::ostream& os, sequence<char> const &s)\n  {\n    // pad with a zero\n    sequence<char> out(s.size()+1, [&] (size_t i) {\n\treturn i == s.size() ? 0 : s[i];});\n    os << out.begin();\n    return os;\n  }\n}\n\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/sequence_ops.h",
    "content": "// This code is part of the Problem Based Benchmark Suite (PBBS)\n// Copyright (c) 2011-2019 Guy Blelloch and the PBBS team\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights (to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to\n// permit persons to whom the Software is furnished to do so, subject to\n// the following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n#pragma once\n\n#include <iostream>\n#include \"utilities.h\"\n#include \"seq.h\"\n#include \"monoid.h\"\n\nnamespace pbbs {\n\n  template <class UnaryFunc>\n  auto tabulate(size_t n, UnaryFunc f) -> sequence<decltype(f(0))> {\n    return sequence<decltype(f(0))>(n, [&] (size_t i) {return f(i);});}\n\n  template <SEQ Seq, class UnaryFunc>\n  auto map(Seq const &A, UnaryFunc f) -> sequence<decltype(f(A[0]))> {\n    return tabulate(A.size(), [&] (size_t i) {return f(A[i]);});}\n\n  // delayed version of map\n  // requires C++14 or greater, both since return type is not defined (a lambda)\n  //   and for support of initialization of the closure lambda capture\n  template <SEQ Seq, class UnaryFunc>\n  auto dmap(Seq &&A, UnaryFunc&& f) {\n    size_t n = A.size();\n    return dseq(n, [f=std::forward<UnaryFunc>(f),\n\t\t    A=std::forward<Seq>(A)] (size_t i) {\n\t\t  return f(A[i]);});}\n\n  template <class T>\n  auto singleton(T const &v) -> sequence<T> {\n    return sequence<T>(1, v); }\n\n  template <SEQ Seq, RANGE Range>\n  auto copy(Seq const &A, Range R, flags) -> void {\n    parallel_for(0, A.size(), [&] (size_t i) {R[i] = A[i];});}\n\n  constexpr const size_t _log_block_size = 10;\n  constexpr const size_t _block_size = (1 << _log_block_size);\n\n  inline size_t num_blocks(size_t n, size_t block_size) {\n    if (n == 0) return 0;\n    else return (1 + ((n)-1)/(block_size));}\n\n  template <class F>\n  void sliced_for(size_t n, size_t block_size, const F& f, flags fl = no_flag) {\n    size_t l = num_blocks(n, block_size);\n    auto body = [&] (size_t i) {\n      size_t s = i * block_size;\n      size_t e = std::min(s + block_size, n);\n      f(i, s, e);\n    };\n    parallel_for(0, l, body, 1, 0 != (fl & fl_conservative));\n  }\n\n  template <SEQ Seq, class Monoid>\n  auto reduce_serial(Seq const &A, Monoid m) -> typename Seq::value_type {\n    using T = typename Seq::value_type;\n    T r = A[0];\n    for (size_t j=1; j < A.size(); j++) r = m.f(r,A[j]);\n    return r;\n  }\n\n  template <SEQ Seq, class Monoid>\n  auto reduce(Seq const &A, Monoid m, flags fl = no_flag)\n    -> typename Seq::value_type\n  {\n    using T = typename Seq::value_type;\n    size_t n = A.size();\n    size_t block_size = std::max(_block_size, 4 * (size_t) ceil(sqrt(n)));\n    size_t l = num_blocks(n, block_size);\n    if (l == 0) return m.identity;\n    if (l == 1 || (fl & fl_sequential)) {\n      return reduce_serial(A, m); }\n    sequence<T> Sums(l);\n    sliced_for (n, block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ Sums[i] = reduce_serial(A.slice(s,e), m);});\n    T r = reduce(Sums, m);\n    return r;\n  }\n\n  const flags fl_scan_inclusive = (1 << 4);\n\n  template <SEQ In_Seq, RANGE Out_Seq, class Monoid>\n  auto scan_serial(In_Seq const &In, Out_Seq Out,\n\t\t   Monoid const &m, typename In_Seq::value_type offset,\n\t\t   flags fl = no_flag)  -> typename In_Seq::value_type\n  {\n    using T = typename In_Seq::value_type;\n    T r = offset;\n    size_t n = In.size();\n    bool inclusive = fl & fl_scan_inclusive;\n    if (inclusive) {\n      for (size_t i = 0; i < n; i++) {\n\tr = m.f(r,In[i]);\n\tOut[i] = r;\n      }\n    } else {\n      for (size_t i = 0; i < n; i++) {\n\tT t = In[i];\n\tOut[i] = r;\n\tr = m.f(r,t);\n      }\n    }\n    return r;\n  }\n\n  template <SEQ In_Seq, RANGE Out_Range, class Monoid>\n  auto scan_(In_Seq const &In, Out_Range Out, Monoid const &m,\n\t     flags fl = no_flag) -> typename In_Seq::value_type\n  {\n    using T = typename In_Seq::value_type;\n    size_t n = In.size();\n    size_t l = num_blocks(n,_block_size);\n    if (l <= 2 || fl & fl_sequential)\n      return scan_serial(In, Out, m, m.identity, fl);\n    sequence<T> Sums(l);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ Sums[i] = reduce_serial(In.slice(s,e), m);});\n    T total = scan_serial(Sums, Sums.slice(), m, m.identity, 0);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ auto O = Out.slice(s,e);\n\t\t  scan_serial(In.slice(s,e), O, m, Sums[i], fl);});\n    return total;\n  }\n\n  template <RANGE Range, class Monoid>\n  auto scan_inplace(Range In, Monoid m, flags fl = no_flag)\n    -> typename Range::value_type\n  { return scan_(In, In, m, fl); }\n\n  template <SEQ In_Seq, class Monoid>\n  auto scan(In_Seq const &In, Monoid m, flags fl = no_flag)\n    ->  std::pair<sequence<typename In_Seq::value_type>, typename In_Seq::value_type>\n  {\n    using T = typename In_Seq::value_type;\n    sequence<T> Out(In.size());\n    return std::make_pair(std::move(Out), scan_(In, Out.slice(), m, fl));\n  }\n\n  // do in place if rvalue reference to a sequence<T>\n  template <class T, class Monoid>\n  auto scan(sequence<T> &&In, Monoid m, flags fl = no_flag)\n    ->  std::pair<sequence<T>, T> {\n    sequence<T> Out = std::move(In);\n    T total = scan_(Out, Out.slice(), m, fl);\n    return std::make_pair(std::move(Out), total);\n  }\n\n  template <SEQ Seq>\n  size_t sum_bools_serial(Seq const &I) {\n    size_t r = 0;\n    for (size_t j=0; j < I.size(); j++) r += I[j];\n    return r;\n  }\n\n  template <SEQ In_Seq, class Bool_Seq>\n  auto pack_serial(In_Seq const &In, Bool_Seq const &Fl)\n      -> sequence<typename In_Seq::value_type> {\n    using T = typename In_Seq::value_type;\n    size_t n = In.size();\n    size_t m = sum_bools_serial(Fl);\n    sequence<T> Out = sequence<T>::no_init(m);\n    size_t k = 0;\n    for (size_t i = 0; i < n; i++)\n      if (Fl[i]) assign_uninitialized(Out[k++], In[i]);\n    return Out;\n  }\n\n  template <class Slice, class Slice2, RANGE Out_Seq>\n  size_t pack_serial_at(Slice In, Slice2 Fl, Out_Seq Out) {\n    size_t k = 0;\n    for (size_t i=0; i < In.size(); i++)\n      if (Fl[i]) assign_uninitialized(Out[k++], In[i]);\n    return k;\n  }\n\n  template <SEQ In_Seq, SEQ Bool_Seq>\n  auto pack(In_Seq const &In, Bool_Seq const &Fl, flags fl = no_flag)\n      -> sequence<typename In_Seq::value_type> {\n    using T = typename In_Seq::value_type;\n    size_t n = In.size();\n    size_t l = num_blocks(n, _block_size);\n    if (l == 1 || fl & fl_sequential)\n      return pack_serial(In, Fl);\n    sequence<size_t> Sums(l);\n    sliced_for(n, _block_size, [&] (size_t i, size_t s, size_t e) {\n      Sums[i] = sum_bools_serial(Fl.slice(s, e));\n    });\n    size_t m = scan_inplace(Sums.slice(), addm<size_t>());\n    sequence<T> Out = sequence<T>::no_init(m);\n    sliced_for(n, _block_size, [&](size_t i, size_t s, size_t e) {\n\tpack_serial_at(In.slice(s, e),  Fl.slice(s, e),\n\t\t       Out.slice(Sums[i], (i == l-1) ? m : Sums[i+1]));\n    });\n    return Out;\n  }\n\n  // Pack the output to the output range.\n  template <SEQ In_Seq, SEQ Bool_Seq, RANGE Out_Seq>\n  size_t pack_out(In_Seq const &In, Bool_Seq const &Fl, Out_Seq Out,\n\t\t  flags fl = no_flag)\n  {\n    size_t n = In.size();\n    size_t l = num_blocks(n, _block_size);\n    if (l <= 1 || fl & fl_sequential) {\n      return pack_serial_at(In, Fl.slice(0, In.size()), Out);\n    }\n    sequence<size_t> Sums(l);\n    sliced_for(n, _block_size, [&] (size_t i, size_t s, size_t e) {\n      Sums[i] = sum_bools_serial(Fl.slice(s, e));\n    });\n    size_t m = scan_inplace(Sums.slice(), addm<size_t>());\n    sliced_for(n, _block_size, [&](size_t i, size_t s, size_t e) {\n      pack_serial_at(In.slice(s, e),  Fl.slice(s, e),\n                     Out.slice(Sums[i], (i == l-1) ? m : Sums[i+1]));\n    });\n    return m;\n  }\n\n  template <SEQ In_Seq, class F>\n  auto filter(In_Seq const &In, F f)\n    -> sequence<typename In_Seq::value_type>\n  {\n    using T = typename In_Seq::value_type;\n    size_t n = In.size();\n    size_t l = num_blocks(n,_block_size);\n    sequence<size_t> Sums(l);\n    sequence<bool> Fl(n);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ size_t r = 0;\n\t\t  for (size_t j=s; j < e; j++)\n\t\t    r += (Fl[j] = f(In[j]));\n\t\t  Sums[i] = r;});\n    size_t m = scan_inplace(Sums.slice(), addm<size_t>());\n    sequence<T> Out = sequence<T>::no_init(m);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ pack_serial_at(In.slice(s,e),\n\t\t\t\t Fl.slice(s,e),\n\t\t\t\t Out.slice(Sums[i], (i == l-1) ? m : Sums[i+1]));});\n    return Out;\n  }\n\n  template <SEQ In_Seq, class F>\n  auto filter(In_Seq const &In, F f, flags)\n  { return filter(In, f);}\n  \n  // Filter and write the output to the output range.\n  template <SEQ In_Seq, RANGE Out_Seq, class F>\n  size_t filter_out(In_Seq const &In, Out_Seq Out, F f) {\n    size_t n = In.size();\n    size_t l = pbbs::num_blocks(n,_block_size);\n    pbbs::sequence<size_t> Sums(l);\n    pbbs::sequence<bool> Fl(n);\n    pbbs::sliced_for (n, pbbs::_block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ size_t r = 0;\n\t\t  for (size_t j=s; j < e; j++)\n\t\t    r += (Fl[j] = f(In[j]));\n\t\t  Sums[i] = r;});\n    size_t m = scan_inplace(Sums.slice(), addm<size_t>());\n    pbbs::sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{ pack_serial_at(In.slice(s,e), Fl.slice(s,e),\n                  Out.slice(Sums[i], (i == l-1) ? m : Sums[i+1]));});\n    return m;\n  }\n\n  template <SEQ In_Seq, RANGE Out_Seq, class F>\n  size_t filter_out(In_Seq const &In, Out_Seq Out, F f, flags) {\n    return filter_out(In, Out, f);}\n\n  template <class Idx_Type, SEQ Bool_Seq>\n  sequence<Idx_Type> pack_index(Bool_Seq const &Fl, flags fl = no_flag) {\n    auto identity = [] (size_t i) {return (Idx_Type) i;};\n    return pack(delayed_seq<Idx_Type>(Fl.size(),identity), Fl, fl);\n  }\n\n  template <SEQ In_Seq, SEQ Char_Seq>\n  std::pair<size_t,size_t> split_three(In_Seq const &In,\n\t\t\t\t       range<typename In_Seq::value_type*> Out,\n\t\t\t\t       Char_Seq const &Fl,\n\t\t\t\t       flags fl = no_flag) {\n    size_t n = In.size();\n    if (slice_eq(In.slice(), Out)) \n      throw std::invalid_argument(\"In and Out cannot be the same in split_three\");\n    size_t l = num_blocks(n,_block_size);\n    sequence<size_t> Sums0(l);\n    sequence<size_t> Sums1(l);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e) {\n\t\t  size_t c0 = 0; size_t c1 = 0;\n\t\t  for (size_t j=s; j < e; j++) {\n\t\t    if (Fl[j] == 0) c0++;\n\t\t    else if (Fl[j] == 1) c1++;\n\t\t  }\n\t\t  Sums0[i] = c0; Sums1[i] = c1;\n\t\t}, fl);\n    size_t m0 = scan_inplace(Sums0.slice(), addm<size_t>());\n    size_t m1 = scan_inplace(Sums1.slice(), addm<size_t>());\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e)\n\t\t{\n\t\t  size_t c0 = Sums0[i];\n\t\t  size_t c1 = m0 + Sums1[i];\n\t\t  size_t c2 = m0 + m1 + (s - Sums0[i] - Sums1[i]);\n\t\t  for (size_t j=s; j < e; j++) {\n\t\t    if (Fl[j] == 0) Out[c0++] = In[j];\n\t\t    else if (Fl[j] == 1) Out[c1++] = In[j];\n\t\t    else Out[c2++] = In[j];\n\t\t  }\n\t\t}, fl);\n    return std::make_pair(m0,m1);\n  }\n\n  template <SEQ In_Seq, SEQ Bool_Seq>\n  auto split_two(In_Seq const &In,\n\t\t Bool_Seq const &Fl,\n\t\t flags fl = no_flag)\n    -> std::pair<sequence<typename In_Seq::value_type>, size_t> {\n    using T = typename In_Seq::value_type;\n    size_t n = In.size();\n    size_t l = num_blocks(n,_block_size);\n    sequence<size_t> Sums(l);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e) {\n\t\t  size_t c = 0;\n\t\t  for (size_t j=s; j < e; j++)\n\t\t    c += (Fl[j] == false);\n\t\t  Sums[i] = c;\n\t\t}, fl);\n    size_t m = scan_inplace(Sums.slice(), addm<size_t>());\n    sequence<T> Out = sequence<T>::no_init(n);\n    sliced_for (n, _block_size,\n\t\t[&] (size_t i, size_t s, size_t e) {\n\t\t  size_t c0 = Sums[i];\n\t\t  size_t c1 = s + (m - c0);\n\t\t  for (size_t j=s; j < e; j++) {\n\t\t    if (Fl[j] == false) assign_uninitialized(Out[c0++],In[j]);\n\t\t    else assign_uninitialized(Out[c1++],In[j]);\n\t\t  }\n\t\t}, fl);\n    return std::make_pair(std::move(Out), m);\n  }\n}\n\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/thread_service.h",
    "content": "#ifndef THREAD_SERVICE\n#define THREAD_SERVICE\n\n#include <random>\n#include <array>\n#include \"data.h\"\n\n\nstd::atomic<int> miss(0);\n\nstruct WorkerArgs \n{\n  int    num_elems;\n  // R/I/D weights, normalized to 100\n  int    rweight;\n  int    iweight;\n  int    dweight; \n  void*  ht_p;\n\n  bool   remove;\n  int    tid;\n  int    start;\n  int*   elems;\n};\n\ntemplate<typename T>\nvoid* thread_service(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  for (int i = 0; i < num_elems; i++)\n  {\n    // Key, Value pair\n    int k = rng(mt);\n    int v = rng(mt);\n    // Action : 0 -> Search, 1 -> Insert, 2 -> Remove\n    int a = drng(g);\n\n    if (a == 0)\n      ht_p->find(k);\n    else if (a == 1)\n      ht_p->insert({k, v});\n    else\n      ht_p->deleteVal(k);\n  }\n}\n\ntemplate<typename T>\nvoid* thread_service_low_contention(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  int *keys = (args->elems + args->start);\n\n  int start = 0;\n  int end = 0;\n  for (int i = 0; i < num_elems; i++)\n  {\n    // Action : 0 -> Search, 1 -> Insert, 2 -> Remove\n    int a = drng(g);\n\n    if (start == end || a == 1) \n    {\n      int k = rng(mt) % num_elems + tid * num_elems; \n      keys[end++] = k;\n      ht_p->insert({k, k});\n    }\n    else if (a == 0)\n    {\n      int k = rng(mt) % (end - start) + start;\n      ht_p->find(k);\n    }\n    else\n    {\n      int k = keys[start++];\n      ht_p->deleteVal(k);\n    }\n  }\n}\n\ntemplate<typename T>\nvoid* thread_service_high_contention(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  for (int i = 0; i < num_elems; i++)\n  {\n    ht_p->find(0);\n\t}\n}\n\ntemplate<typename T>\nvoid* thread_checkmiss(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n#if 0 \n\t\tstruct KV res = ht_p->find(elems[i]);\n\t\tif (res.k == -1) {\n\t\t\t++miss;\n\t\t\tht_p->insert({elems[i], elems[i]});\n\t\t\tprintf(\"miss! key is %d\\n\", elems[i]);\n\t\t}\n#endif\n\t\tbool res = ht_p->insert({elems[i], elems[i]});\n\t\tif (res) {\n\t\t\t++miss;\n\t\t\tprintf(\"miss!\\n\");\n\t\t}\n\n  }\n\n}\n\n\ntemplate<typename T>\nvoid* thread_insert(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n    ht_p->insert({elems[i], elems[i]});\n  }\n  \n}\n\ntemplate<typename T>\nvoid* thread_remove(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n  bool remove    = args->remove;\n  \n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng(0, 200000 - 1);\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n    if (remove)\n      ht_p->deleteVal(elems[i]);\n    else\n      ht_p->find(elems[rng(mt)]);\n  }\n\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/thread_service_ptr.h",
    "content": "#ifndef THREAD_SERVICE\n#define THREAD_SERVICE\n\n#include <random>\n#include <array>\n#include \"data_ptr.h\"\n\nstd::atomic<int> miss(0);\n\nstruct WorkerArgs \n{\n  int    num_elems;\n  // R/I/D weights, normalized to 100\n  int    rweight;\n  int    iweight;\n  int    dweight; \n  void*  ht_p;\n\n  bool   remove;\n  int    tid;\n  int    start;\n  int*   elems;\n};\n\ntemplate<typename T>\nvoid* thread_service(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  for (int i = 0; i < num_elems; i++)\n  {\n    // Key, Value pair\n    int k = rng(mt);\n    int v = rng(mt);\n    // Action : 0 -> Search, 1 -> Insert, 2 -> Remove\n    int a = drng(g);\n\n    if (a == 0)\n      ht_p->find(k);\n    else if (a == 1)\n      ht_p->insert(new struct KV (k, v));\n    else\n      ht_p->deleteVal(k);\n  }\n}\n\ntemplate<typename T>\nvoid* thread_service_low_contention(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  int *keys = (args->elems + args->start);\n\n  int start = 0;\n  int end = 0;\n  for (int i = 0; i < num_elems; i++)\n  {\n    // Action : 0 -> Search, 1 -> Insert, 2 -> Remove\n    int a = drng(g);\n\n    if (start == end || a == 1) \n    {\n      int k = rng(mt) % num_elems + tid * num_elems; \n      keys[end++] = k;\n      ht_p->insert(new struct KV(k, k));\n    }\n    else if (a == 0)\n    {\n      int k = rng(mt) % (end - start) + start;\n      ht_p->find(k);\n    }\n    else\n    {\n      int k = keys[start++];\n      ht_p->deleteVal(k);\n    }\n  }\n}\n\ntemplate<typename T>\nvoid* thread_service_high_contention(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n\n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng;\n\n  std::array<int, 3> weights;\n  weights[0] = args->rweight;\n  weights[1] = args->iweight;\n  weights[2] = args->dweight;\n\n  std::default_random_engine         g;\n  std::discrete_distribution<int>    drng(weights.begin(), weights.end());\n\n  int tid       = args->tid;\n  int num_elems = args->num_elems;\n  T* ht_p = static_cast<T*>(args->ht_p);\n\n  for (int i = 0; i < num_elems; i++)\n  {\n    ht_p->find(0);\n\t}\n}\n\ntemplate<typename T>\nvoid* thread_checkmiss(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n#if 1\n\t\tstruct KV* res = ht_p->find(elems[i]);\n\t\tif (res == nullptr || res->k == -1) {\n\t\t\t++miss;\n\t\t\tht_p->insert(new struct KV (elems[i], elems[i]));\n\t\t\tprintf(\"miss!! key is %d\\n\",elems[i]);\n\t\t}\n#endif\n#if 0\n\t\tbool res = ht_p->insert(new struct KV(elems[i], elems[i]));\n\t\tif (res) {\n\t\t\t++miss;\n\t\t\tprintf(\"miss! key is %d\\n\",elems[i]);\n\t\t}\n#endif\n  }\n\n}\n\n\ntemplate<typename T>\nvoid* thread_insert(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n    ht_p->insert({elems[i], elems[i]});\n  }\n  \n}\n\ntemplate<typename T>\nvoid* thread_remove(void* threadArgs)\n{\n  WorkerArgs* args = static_cast<WorkerArgs*>(threadArgs);\n  int* elems = args->elems;\n  T*   ht_p  = static_cast<T*>(args->ht_p);\n  int  start     = args->start;\n  int  num_elems = args->num_elems;\n  int  tid       = args->tid;\n  bool remove    = args->remove;\n  \n  std::random_device                 rd;\n  std::mt19937                       mt(rd());\n  std::uniform_int_distribution<int> rng(0, 200000 - 1);\n\n  for (int i = start; i < start + num_elems; i++)\n  {\n    if (remove)\n      ht_p->deleteVal(elems[i]);\n    else\n      ht_p->find(elems[rng(mt)]);\n  }\n\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/lockfreehash/lprobe/utilities.h",
    "content": "#pragma once\n\n#include <iostream>\n#include <ctype.h>\n#include <memory>\n#include <stdlib.h>\n#include <type_traits>\n#include <type_traits>\n#include <math.h>\n#include <atomic>\n#include <cstring>\n#include \"parallel.h\"\n\nusing std::cout;\nusing std::endl;\n\ntemplate <typename Lf, typename Rf >\nstatic void par_do_if(bool do_parallel, Lf left, Rf right, bool cons=false) {\n  if (do_parallel) par_do(left, right, cons);\n  else {left(); right();}\n}\n\ntemplate <typename Lf, typename Mf, typename Rf >\ninline void par_do3(Lf left, Mf mid, Rf right) {\n  auto left_mid = [&] () {par_do(left,mid);};\n  par_do(left_mid, right);\n}\n\ntemplate <typename Lf, typename Mf, typename Rf >\nstatic void par_do3_if(bool do_parallel, Lf left, Mf mid, Rf right) {\n  if (do_parallel) par_do3(left, mid, right);\n  else {left(); mid(); right();}\n}\n\nnamespace pbbs {\n  template <class T>\n  size_t log2_up(T);\n}\n\ntemplate <class T>\nstruct maybe {\n\tT value;\n\tbool valid;\n\n\tmaybe(T v, bool u) : value(v) {\n\t\tvalid = u;\n\t}\n\tmaybe(T v) : value(v) {\n\t\tvalid = true;\n\t}\n\tmaybe() {\n\t\tvalid = false;\n\t}\n\n\tbool operator !() const {\n\t\treturn !valid;\n\t}\n\toperator bool() const {\n\t\treturn valid;\n\t};\n\tT& operator * () {\n\t\treturn value;\n\t}\n};\n\nnamespace pbbs {\n\n  struct empty {};\n\n  typedef uint32_t flags;\n  const flags no_flag = 0;\n  const flags fl_sequential = 1;\n  const flags fl_debug = 2;\n  const flags fl_time = 4;\n  const flags fl_conservative = 8;\n  const flags fl_inplace = 16;\n\n  template<typename T>\n  inline void assign_uninitialized(T& a, const T& b) {\n    new (static_cast<void*>(std::addressof(a))) T(b);\n  }\n\n  template<typename T>\n  inline void assign_uninitialized(T& a, T&& b) { \n    new (static_cast<void*>(std::addressof(a))) T(std::move(b));\n  }\n\n  template<typename T>\n  inline void move_uninitialized(T& a, const T b) {\n    new (static_cast<void*>(std::addressof(a))) T(std::move(b));\n  }\n\n  template<typename T>\n  inline void copy_memory(T& a, const T &b) {\n    std::memcpy(&a, &b, sizeof(T));\n  }\n\n  enum _copy_type { _assign, _move, _copy};\n  \n  template<_copy_type copy_type, typename T>\n  inline void copy_val(T& a, const T &b) {\n    switch (copy_type) {\n    case _assign: assign_uninitialized(a, b); break;\n    case _move: move_uninitialized(a, b); break;\n    case _copy: copy_memory(a,b); break;\n    }\n  }\n  \n  // a 32-bit hash function\n  inline uint32_t hash32(uint32_t a) {\n    a = (a+0x7ed55d16) + (a<<12);\n    a = (a^0xc761c23c) ^ (a>>19);\n    a = (a+0x165667b1) + (a<<5);\n    a = (a+0xd3a2646c) ^ (a<<9);\n    a = (a+0xfd7046c5) + (a<<3);\n    a = (a^0xb55a4f09) ^ (a>>16);\n    return a;\n  }\n\n  inline uint32_t hash32_2(uint32_t a) {\n    uint32_t z = (a + 0x6D2B79F5UL);\n    z = (z ^ (z >> 15)) * (z | 1UL);\n    z ^= z + (z ^ (z >> 7)) * (z | 61UL);\n    return z ^ (z >> 14);\n  }\n\n  inline uint32_t hash32_3(uint32_t a) {\n      uint32_t z = a + 0x9e3779b9;\n      z ^= z >> 15; // 16 for murmur3\n      z *= 0x85ebca6b;\n      z ^= z >> 13;\n      z *= 0xc2b2ae3d; // 0xc2b2ae35 for murmur3\n      return z ^= z >> 16;\n  }\n\n\n  // from numerical recipes\n  inline uint64_t hash64(uint64_t u )\n  {\n    uint64_t v = u * 3935559000370003845ul + 2691343689449507681ul;\n    v ^= v >> 21;\n    v ^= v << 37;\n    v ^= v >>  4;\n    v *= 4768777513237032717ul;\n    v ^= v << 20;\n    v ^= v >> 41;\n    v ^= v <<  5;\n    return v;\n  }\n\n  // a slightly cheaper, but possibly not as good version\n  // based on splitmix64\n  inline uint64_t hash64_2(uint64_t x) {\n    x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);\n    x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb);\n    x = x ^ (x >> 31);\n    return x;\n  }\n\n\n  template <typename ET>\n  inline bool atomic_compare_and_swap(ET* a, ET oldval, ET newval) {\n    static_assert(sizeof(ET) <= 8, \"Bad CAS length\");\n    if (sizeof(ET) == 1) {\n      uint8_t r_oval, r_nval;\n      std::memcpy(&r_oval, &oldval, 1);\n      std::memcpy(&r_nval, &newval, 1);\n      return __sync_bool_compare_and_swap(reinterpret_cast<uint8_t*>(a), r_oval, r_nval);\n    } else if (sizeof(ET) == 4) {\n      uint32_t r_oval, r_nval;\n      std::memcpy(&r_oval, &oldval, 4);\n      std::memcpy(&r_nval, &newval, 4);\n      return __sync_bool_compare_and_swap(reinterpret_cast<uint32_t*>(a), r_oval, r_nval);\n    } else { // if (sizeof(ET) == 8) {\n      uint64_t r_oval, r_nval;\n      std::memcpy(&r_oval, &oldval, 8);\n      std::memcpy(&r_nval, &newval, 8);\n      return __sync_bool_compare_and_swap(reinterpret_cast<uint64_t*>(a), r_oval, r_nval);\n    } \n  }\n\n  template <typename E, typename EV>\n  inline E fetch_and_add(E *a, EV b) {\n    volatile E newV, oldV;\n    do {oldV = *a; newV = oldV + b;}\n    while (!atomic_compare_and_swap(a, oldV, newV));\n    return oldV;\n  }\n\n  template <typename E, typename EV>\n  inline void write_add(E *a, EV b) {\n    //volatile E newV, oldV;\n    E newV, oldV;\n    do {oldV = *a; newV = oldV + b;}\n    while (!atomic_compare_and_swap(a, oldV, newV));\n  }\n\n  template <typename E, typename EV>\n  inline void write_add(std::atomic<E> *a, EV b) {\n    //volatile E newV, oldV;\n    E newV, oldV;\n    do {oldV = a->load(); newV = oldV + b;}\n    while (!std::atomic_compare_exchange_strong(a, &oldV, newV));\n  }\n\n  template <typename ET, typename F>\n  inline bool write_min(ET *a, ET b, F less) {\n    ET c; bool r=0;\n    do c = *a;\n    while (less(b,c) && !(r=atomic_compare_and_swap(a,c,b)));\n    return r;\n  }\n\n  template <typename ET, typename F>\n  inline bool write_min(std::atomic<ET> *a, ET b, F less) {\n    ET c; bool r=0;\n    do c = a->load();\n    while (less(b,c) && !(r=std::atomic_compare_exchange_strong(a, &c, b)));\n    return r;\n  }\n\n  template <typename ET, typename F>\n  inline bool write_max(ET *a, ET b, F less) {\n    ET c; bool r=0;\n    do c = *a;\n    while (less(c,b) && !(r=atomic_compare_and_swap(a,c,b)));\n    return r;\n  }\n\n  template <typename ET, typename F>\n  inline bool write_max(std::atomic<ET> *a, ET b, F less) {\n    ET c; bool r=0;\n    do c = a->load();\n    while (less(c,b) && !(r=std::atomic_compare_exchange_strong(a, &c, b)));\n    return r;\n  }\n\n  // returns the log base 2 rounded up (works on ints or longs or unsigned versions)\n  template <class T>\n  size_t log2_up(T i) {\n    size_t a=0;\n    T b=i-1;\n    while (b > 0) {b = b >> 1; a++;}\n    return a;\n  }\n\n  inline size_t granularity(size_t n) {\n    return (n > 100) ? ceil(pow(n,0.5)) : 100;\n  }\n}\n"
  },
  {
    "path": "solvers/wheels/threadpool/ThreadPool.h",
    "content": "#ifndef THREAD_POOL_H\n#define THREAD_POOL_H\n\n#include <vector>\n#include <queue>\n#include <memory>\n#include <thread>\n#include <mutex>\n#include <condition_variable>\n#include <future>\n#include <functional>\n#include <stdexcept>\n\nclass ThreadPool {\npublic:\n    ThreadPool(size_t);\n    template<class F, class... Args>\n    auto enqueue(F&& f, Args&&... args) \n        -> std::future<typename std::result_of<F(Args...)>::type>;\n    ~ThreadPool();\nprivate:\n    // need to keep track of threads so we can join them\n    std::vector< std::thread > workers;\n    // the task queue\n    std::queue< std::function<void()> > tasks;\n    \n    // synchronization\n    std::mutex queue_mutex;\n    std::condition_variable condition;\n    bool stop;\n};\n \n// the constructor just launches some amount of workers\ninline ThreadPool::ThreadPool(size_t threads)\n    :   stop(false)\n{\n    for(size_t i = 0;i<threads;++i)\n        workers.emplace_back(\n            [this]\n            {\n                for(;;)\n                {\n                    std::function<void()> task;\n\n                    {\n                        std::unique_lock<std::mutex> lock(this->queue_mutex);\n                        this->condition.wait(lock,\n                            [this]{ return this->stop || !this->tasks.empty(); });\n                        if(this->stop && this->tasks.empty())\n                            return;\n                        task = std::move(this->tasks.front());\n                        this->tasks.pop();\n                    }\n\n                    task();\n                }\n            }\n        );\n}\n\n// add new work item to the pool\ntemplate<class F, class... Args>\nauto ThreadPool::enqueue(F&& f, Args&&... args) \n    -> std::future<typename std::result_of<F(Args...)>::type>\n{\n    using return_type = typename std::result_of<F(Args...)>::type;\n\n    auto task = std::make_shared< std::packaged_task<return_type()> >(\n            std::bind(std::forward<F>(f), std::forward<Args>(args)...)\n        );\n        \n    std::future<return_type> res = task->get_future();\n    {\n        std::unique_lock<std::mutex> lock(queue_mutex);\n\n        // don't allow enqueueing after stopping the pool\n        if(stop)\n            throw std::runtime_error(\"enqueue on stopped ThreadPool\");\n\n        tasks.emplace([task](){ (*task)(); });\n    }\n    condition.notify_one();\n    return res;\n}\n\n// the destructor joins all threads\ninline ThreadPool::~ThreadPool()\n{\n    {\n        std::unique_lock<std::mutex> lock(queue_mutex);\n        stop = true;\n    }\n    condition.notify_all();\n    for(std::thread &worker: workers)\n        worker.join();\n}\n\n#endif\n"
  },
  {
    "path": "solvers/wheels/threadpool/ctpl.h",
    "content": "\n/*********************************************************\n *\n *  Copyright (C) 2014 by Vitaliy Vitsentiy\n *\n *  Licensed under the Apache License, Version 2.0 (the \"License\");\n *  you may not use this file except in compliance with the License.\n *  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n *  Unless required by applicable law or agreed to in writing, software\n *  distributed under the License is distributed on an \"AS IS\" BASIS,\n *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n *  See the License for the specific language governing permissions and\n *  limitations under the License.\n *\n *********************************************************/\n\n\n#ifndef __ctpl_thread_pool_H__\n#define __ctpl_thread_pool_H__\n\n#include <functional>\n#include <thread>\n#include <atomic>\n#include <vector>\n#include <memory>\n#include <exception>\n#include <future>\n#include <mutex>\n#include <boost/lockfree/queue.hpp>\n\n\n#ifndef _ctplThreadPoolLength_\n#define _ctplThreadPoolLength_  1000000\n#endif\n\n\n// thread pool to run user's functors with signature\n//      ret func(int id, other_params)\n// where id is the index of the thread that runs the functor\n// ret is some return type\n\n\nnamespace ctpl {\n\n    class thread_pool {\n\n    public:\n\n        thread_pool() : q(_ctplThreadPoolLength_) { this->init(); }\n        thread_pool(int nThreads, int queueSize = _ctplThreadPoolLength_) : q(queueSize) { this->init(); this->resize(nThreads); }\n\n        // the destructor waits for all the functions in the queue to be finished\n        ~thread_pool() {\n            this->stop(true);\n        }\n\n        // get the number of running threads in the pool\n        int size() { return static_cast<int>(this->threads.size()); }\n\n        // number of idle threads\n        int n_idle() { return this->nWaiting; }\n        std::thread & get_thread(int i) { return *this->threads[i]; }\n\n        // change the number of threads in the pool\n        // should be called from one thread, otherwise be careful to not interleave, also with this->stop()\n        // nThreads must be >= 0\n        void resize(int nThreads) {\n            if (!this->isStop && !this->isDone) {\n                int oldNThreads = static_cast<int>(this->threads.size());\n                if (oldNThreads <= nThreads) {  // if the number of threads is increased\n                    this->threads.resize(nThreads);\n                    this->flags.resize(nThreads);\n\n                    for (int i = oldNThreads; i < nThreads; ++i) {\n                        this->flags[i] = std::make_shared<std::atomic<bool>>(false);\n                        this->set_thread(i);\n                    }\n                }\n                else {  // the number of threads is decreased\n                    for (int i = oldNThreads - 1; i >= nThreads; --i) {\n                        *this->flags[i] = true;  // this thread will finish\n                        this->threads[i]->detach();\n                    }\n                    {\n                        // stop the detached threads that were waiting\n                        std::unique_lock<std::mutex> lock(this->mutex);\n                        this->cv.notify_all();\n                    }\n                    this->threads.resize(nThreads);  // safe to delete because the threads are detached\n                    this->flags.resize(nThreads);  // safe to delete because the threads have copies of shared_ptr of the flags, not originals\n                }\n            }\n        }\n\n        // empty the queue\n        void clear_queue() {\n            std::function<void(int id)> * _f;\n            while (this->q.pop(_f))\n                delete _f;  // empty the queue\n        }\n\n        // pops a functional wraper to the original function\n        std::function<void(int)> pop() {\n            std::function<void(int id)> * _f = nullptr;\n            this->q.pop(_f);\n            std::unique_ptr<std::function<void(int id)>> func(_f);  // at return, delete the function even if an exception occurred\n            \n            std::function<void(int)> f;\n            if (_f)\n                f = *_f;\n            return f;\n        }\n\n\n        // wait for all computing threads to finish and stop all threads\n        // may be called asyncronously to not pause the calling thread while waiting\n        // if isWait == true, all the functions in the queue are run, otherwise the queue is cleared without running the functions\n        void stop(bool isWait = false) {\n            if (!isWait) {\n                if (this->isStop)\n                    return;\n                this->isStop = true;\n                for (int i = 0, n = this->size(); i < n; ++i) {\n                    *this->flags[i] = true;  // command the threads to stop\n                }\n                this->clear_queue();  // empty the queue\n            }\n            else {\n                if (this->isDone || this->isStop)\n                    return;\n                this->isDone = true;  // give the waiting threads a command to finish\n            }\n            {\n                std::unique_lock<std::mutex> lock(this->mutex);\n                this->cv.notify_all();  // stop all waiting threads\n            }\n            for (int i = 0; i < static_cast<int>(this->threads.size()); ++i) {  // wait for the computing threads to finish\n                if (this->threads[i]->joinable())\n                    this->threads[i]->join();\n            }\n            // if there were no threads in the pool but some functors in the queue, the functors are not deleted by the threads\n            // therefore delete them here\n            this->clear_queue();\n            this->threads.clear();\n            this->flags.clear();\n        }\n\n        template<typename F, typename... Rest>\n        auto push(F && f, Rest&&... rest) ->std::future<decltype(f(0, rest...))> {\n            auto pck = std::make_shared<std::packaged_task<decltype(f(0, rest...))(int)>>(\n                std::bind(std::forward<F>(f), std::placeholders::_1, std::forward<Rest>(rest)...)\n            );\n\n            auto _f = new std::function<void(int id)>([pck](int id) {\n                (*pck)(id);\n            });\n            this->q.push(_f);\n\n            std::unique_lock<std::mutex> lock(this->mutex);\n            this->cv.notify_one();\n\n            return pck->get_future();\n        }\n\n        // run the user's function that excepts argument int - id of the running thread. returned value is templatized\n        // operator returns std::future, where the user can get the result and rethrow the catched exceptins\n        template<typename F>\n        auto push(F && f) ->std::future<decltype(f(0))> {\n            auto pck = std::make_shared<std::packaged_task<decltype(f(0))(int)>>(std::forward<F>(f));\n\n            auto _f = new std::function<void(int id)>([pck](int id) {\n                (*pck)(id);\n            });\n            this->q.push(_f);\n\n            std::unique_lock<std::mutex> lock(this->mutex);\n            this->cv.notify_one();\n\n            return pck->get_future();\n        }\n\n\n    private:\n\n        // deleted\n        thread_pool(const thread_pool &);// = delete;\n        thread_pool(thread_pool &&);// = delete;\n        thread_pool & operator=(const thread_pool &);// = delete;\n        thread_pool & operator=(thread_pool &&);// = delete;\n\n        void set_thread(int i) {\n            std::shared_ptr<std::atomic<bool>> flag(this->flags[i]);  // a copy of the shared ptr to the flag\n            auto f = [this, i, flag/* a copy of the shared ptr to the flag */]() {\n                std::atomic<bool> & _flag = *flag;\n                std::function<void(int id)> * _f;\n                bool isPop = this->q.pop(_f);\n                while (true) {\n                    while (isPop) {  // if there is anything in the queue\n                        std::unique_ptr<std::function<void(int id)>> func(_f);  // at return, delete the function even if an exception occurred\n                        (*_f)(i);\n\n                        if (_flag)\n                            return;  // the thread is wanted to stop, return even if the queue is not empty yet\n                        else\n                            isPop = this->q.pop(_f);\n                    }\n\n                    // the queue is empty here, wait for the next command\n                    std::unique_lock<std::mutex> lock(this->mutex);\n                    ++this->nWaiting;\n                    this->cv.wait(lock, [this, &_f, &isPop, &_flag](){ isPop = this->q.pop(_f); return isPop || this->isDone || _flag; });\n                    --this->nWaiting;\n\n                    if (!isPop)\n                        return;  // if the queue is empty and this->isDone == true or *flag then return\n                }\n            };\n            this->threads[i].reset(new std::thread(f));  // compiler may not support std::make_unique()\n        }\n\n        void init() { this->nWaiting = 0; this->isStop = false; this->isDone = false; }\n\n        std::vector<std::unique_ptr<std::thread>> threads;\n        std::vector<std::shared_ptr<std::atomic<bool>>> flags;\n        mutable boost::lockfree::queue<std::function<void(int id)> *> q;\n        std::atomic<bool> isDone;\n        std::atomic<bool> isStop;\n        std::atomic<int> nWaiting;  // how many threads are waiting\n\n        std::mutex mutex;\n        std::condition_variable cv;\n    };\n\n}\n\n#endif // __ctpl_thread_pool_H__\n\n\n"
  },
  {
    "path": "solvers/wheels/threadpool/threadpool_example.cpp",
    "content": "#include <iostream>\n#include <vector>\n#include <chrono>\n\n#include \"ctpl.h\"\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <sys/mman.h> /* mmap() is defined in this header */\n#include <fcntl.h>\n#include <unistd.h>\n#include <string.h>\n\n\nstruct SArg {\n\tint x;\n\tint y;\n};\n\n\n\nctpl::thread_pool* pool;\nctpl::thread_pool* spool;\n\nint some_function(int id, struct SArg *arg) {\n\tstd::cout << \"hello \" << arg->x << std::endl;\n\tstd::this_thread::sleep_for(std::chrono::seconds(1));\n\tstd::cout << \"world \" << arg->y << std::endl;\n\treturn arg->y;\n}\n\nstatic void generate_input(uint64_t fid) {\n\t\tchar path[1000];\n\t\tstd::string __output_dir = \"/hyper/fuzz/tmp\";\n\t\tstd::string output_file = std::string(__output_dir) + \"/\" + \n\t\t\tstd::to_string(fid) + \"-id\";\n\t\t//std::string input_file = std::string(__output_dir) + \"/\" + taint_file;\n\t\tstd::string input_file =  \"/home/cju/e2e/filter_des/0-id\";\n\t\t//std::cout << \"out file is \" << output_file << std::endl;\n\t\tstruct stat statbuf;\n\t\tvoid *src, *dst;\n\t\tint fdout, fdin;\n\t\tint mode = 0x777;\n\t\t\n\t\t/* open the input file */\n\t\tif ((fdin = open (input_file.c_str(), O_RDONLY)) < 0)\n\t\t{\n\t\t\t//assert(false && \"can't open file for reading\");\n\t\t\tprintf(\"cannot open input file!\\n\");\n\t\t\treturn;\n\t\t}\n\n\t\t/* open/create the output file */\n\t\tif ((fdout = open (output_file.c_str(), O_RDWR | O_CREAT | O_TRUNC, mode)) < 0)//edited here\n\t\t{\n\t\t\t//assert(false && \"can't create file for writing\");\n\t\t\treturn;\n\t\t}\n\n\t\t/* find size of input file */\n\t\tif (fstat (fdin,&statbuf) < 0)\n\t\t{\n\t\t\t//assert (false && \"fstat error\");\n\t\t\tclose(fdin);\n\t\t\treturn;\n\t\t}\t\n\n\t\t/* mmap the input file */\n\t\tif ((src = mmap (0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0))\n\t\t\t\t== (caddr_t) -1) {\n\t\t\tclose(fdin);\n\t\t\treturn;\n\t\t}\n\n\t\tdst = malloc(statbuf.st_size);\n\n\t\t/* this copies the input file to the output file */\n\t\tmemcpy (dst, src, statbuf.st_size);\n\t\tfor (int i=0;i<4;i++) {\n\t\t\t((uint8_t*)dst)[i] = i;\n\t\t\t//printf(\"generate_input index is %u and value is %u\\n\", it->first,(uint32_t)it->second);\n\t\t}\n\n\t\tif (write(fdout, dst, statbuf.st_size) < 0) {\n\t\t\treturn;\n\t\t}\n\n\t\tclose(fdin);\n\t\tclose(fdout);\n\t\tfree(dst);\n}\n\nstd::atomic<uint64_t> id(0);\nstd::atomic<uint64_t> count(0);\n\nvoid addAll(int i) {\n\tgenerate_input(++id);\n\tcount++;\n\tstd::cout << \"task count is \" << count << std::endl;\n\t//uint64_t sum = 0;\n\t//std::this_thread::sleep_for (std::chrono::milliseconds(10));\n/*\n\tfor(int i=0;i<10000000;i++) {\n\t\tsum += i;\t\n\t}\n*/\n//\treturn sum;\n}\n\nvoid task(int i) {\n\tspool->push(addAll);\t\n}\n\n\n\nint main(int argc, char** argv)\n{\n\t\tint num_of_threads = 0;\n\t\tif (sscanf (argv[1], \"%i\", &num_of_threads) != 1) {\n\t\t\tfprintf(stderr, \"error - not an integer\");\n\t\t}\n  pool = new ctpl::thread_pool(num_of_threads);\n\tspool = new ctpl::thread_pool(num_of_threads);\n\tstd::vector< std::future<uint64_t> > results;\n\n\tfor(int i = 0; i < 10000; ++i) {\n\t\t\t\tpool->push(task);\n\t}\n\n\t//std::cout <<\"check results\" << std::endl;\n\t//for(auto && result: results)\n\t//\tstd::cout << result.get() << ' ';\n\t//std::cout << std::endl;\n\t//delete pool;\n\tspool->stop(true);\n\tpool->stop(true);\n\n\treturn 0;\n}\n"
  },
  {
    "path": "solvers/z3-solver.cpp",
    "content": "#include \"solver.h\"\n\n#include <z3++.h>\n\n#include <string.h>\n\nusing namespace rgd;\n\n#define DEBUG 0\n\n#if !DEBUG\n#undef DEBUGF\n#define DEBUGF(_str...) do { } while (0)\n#elif !defined (DEBUGF)\n#define DEBUGF(_str...) do { fprintf(stderr, _str); } while (0)\n#endif\n\n#ifndef WARNF\n#define WARNF(_str...) do { fprintf(stderr, _str); } while (0)\n#endif\n\nz3::context g_z3_context;\nconst unsigned kSolverTimeout = 10000; // 10 seconds\n\nZ3Solver::Z3Solver()\n    : context_(g_z3_context), solver_(z3::solver(context_, \"QF_BV\"))\n{\n  // Set timeout for solver\n  z3::params p(context_);\n  p.set(\":timeout\", kSolverTimeout);\n  solver_.set(p);\n}\n\nstatic inline z3::expr\ncache_expr(uint32_t label, z3::expr const &e, \n           std::unordered_map<uint32_t, z3::expr> &expr_cache) {\t\n  if (label != 0)\n    expr_cache.insert({label, e});\n  return e;\n}\n\nz3::expr Z3Solver::serialize(const AstNode* node,\n    const std::vector<std::pair<bool, uint64_t>> &input_args,\n    std::unordered_map<uint32_t, z3::expr> &expr_cache) {\n\n  auto itr = expr_cache.find(node->label());\n  if (node->label() != 0 && itr != expr_cache.end())\n    return itr->second;\n\n  switch (node->kind()) {\n    case rgd::Bool: {\n      // getTrue is actually 1 bit integer 1\n      return context_.bool_val(node->boolvalue());\n    }\n    case rgd::Constant: {\n      uint64_t val = input_args[node->index()].second;\n      if (node->bits() == 1) {\n        return context_.bool_val(val == 1);\n      } else if (node->bits() <= 64) {\n        return context_.bv_val(val, node->bits());\n      } else {\n        uint32_t chunks = node->bits() / 64;\n        uint32_t remain = node->bits() % 64;\n        z3::expr ret = context_.bv_val(val, 64);\n        for (uint32_t i = 1; i < chunks; i++) {\n          val = input_args[node->index() + i].second;\n          ret = z3::concat(context_.bv_val(val, 64), ret);\n        }\n        if (remain > 0) {\n          val = input_args[node->index() + chunks].second;\n          ret = z3::concat(context_.bv_val(val, remain), ret);\n        }\n        return ret;\n      }\n    }\n    case rgd::Read: {\n      z3::symbol symbol = context_.int_symbol(node->index());\n      z3::sort sort = context_.bv_sort(8);\n      z3::expr out = context_.constant(symbol, sort);\n      for (uint32_t i = 1; i < node->bits() / 8; i++) {\n        symbol = context_.int_symbol(node->index() + i);\n        out = z3::concat(context_.constant(symbol, sort), out);\n      }\n      return cache_expr(node->label(), out, expr_cache);\n    }\n    case rgd::Concat: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::concat(c2, c1), expr_cache);\n    }\n    case rgd::Extract: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      return cache_expr(node->label(),\n                        c1.extract(node->index() + node->bits() - 1, node->index()),\n                        expr_cache);\n    }\n    case rgd::ZExt: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      if (c1.is_bool())\n        c1 = z3::ite(c1, context_.bv_val(1,1), context_.bv_val(0, 1));\n      return cache_expr(node->label(),\n                        z3::zext(c1, node->bits() - node->children(0).bits()),\n                        expr_cache);\n    }\n    case rgd::SExt: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      return cache_expr(node->label(),\n                        z3::sext(c1, node->bits() - node->children(0).bits()),\n                        expr_cache);\n    }\n    case rgd::Add: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 + c2, expr_cache);\n    }\n    case rgd::Sub: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 - c2, expr_cache);\n    }\n    case rgd::Mul: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 * c2, expr_cache);\n    }\n    case rgd::UDiv: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::udiv(c1, c2), expr_cache);\n    }\n    case rgd::SDiv: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 / c2, expr_cache); \n    }\n    case rgd::URem: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::urem(c1, c2), expr_cache);\n    }\n    case rgd::SRem: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::srem(c1, c2), expr_cache);\n    }\n    case rgd::Neg: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      return cache_expr(node->label(), -c1, expr_cache);\n    }\n    case rgd::Not: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      return cache_expr(node->label(), ~c1, expr_cache);\n    }\n    case rgd::And: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 & c2, expr_cache);\n    }\n    case rgd::Or: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 | c2, expr_cache);\n    }\n    case rgd::Xor: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), c1 ^ c2, expr_cache);\n    }\n    case rgd::Shl: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::shl(c1, c2), expr_cache);\n    }\n    case rgd::LShr: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::lshr(c1, c2), expr_cache);\n    }\n    case rgd::AShr: {\n      z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n      z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n      return cache_expr(node->label(), z3::ashr(c1, c2), expr_cache);\n    }\n    // case rgd::LOr: {\n    //   z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n    //   z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n    //   return cache_expr(node->label(), c1 || c2, expr_cache);\n    // }\n    // case rgd::LAnd: {\n    //   z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n    //   z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n    //   return cache_expr(node->label(), c1 && c2, expr_cache);\n    // }\n    // case rgd::LNot: {\n    //   z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n    //   return cache_expr(node->label(), !c1, expr_cache);\n    // }\n    default:\n      WARNF(\"unhandler expr: \");\n      throw z3::exception(\"unsupported operator\");\n      break;\n  }\n}\n\nz3::expr Z3Solver::serialize_rel(uint32_t comparison,\n    const AstNode* node,\n    const std::vector<std::pair<bool, uint64_t>> &input_args,\n    std::unordered_map<uint32_t,z3::expr> &expr_cache) {\n\n  if (node->children_size() != 2) {\n    throw z3::exception(\"invalid children size\");\n  }\n  z3::expr c1 = serialize(&node->children(0), input_args, expr_cache);\n  z3::expr c2 = serialize(&node->children(1), input_args, expr_cache);\n\n  switch(comparison) {\n    case rgd::Equal:\n    case rgd::Memcmp:\n      return c1 == c2;\n    case rgd::Distinct:\n    case rgd::MemcmpN:\n      return c1 != c2;\n    case rgd::Ult:\n      return z3::ult(c1, c2);\n    case rgd::Ule:\n      return z3::ule(c1, c2);\n    case rgd::Ugt:\n      return z3::ugt(c1, c2);\n    case rgd::Uge:\n      return z3::uge(c1, c2);\n    case rgd::Slt:\n      return c1 < c2;\n    case rgd::Sle:\n      return c1 <= c2;\n    case rgd::Sgt:\n      return c1 > c2;\n    case rgd::Sge:\n      return c1 >= c2;\n    default:\n      WARNF(\"unhandler comparison: %d\", comparison);\n      throw z3::exception(\"unsupported operator\");\n      break;\n  }\n}\n\nstatic inline void extract_model(z3::model &m, uint8_t *buf, size_t buf_size,\n                                 std::unordered_map<size_t, uint8_t> &solution) {\n  unsigned num_constants = m.num_consts();\n  for (unsigned i = 0; i< num_constants; i++) {\n    z3::func_decl decl = m.get_const_decl(i);\n    z3::expr e = m.get_const_interp(decl);\n    z3::symbol name = decl.name();\n    if (name.kind() == Z3_INT_SYMBOL) {\n      uint8_t value = (uint8_t)e.get_numeral_int();\n      size_t offset = name.to_int();\n      if (offset < buf_size) {\n        buf[offset] = value;\n        solution[offset] = value;\n        DEBUGF(\"generate_input offset:%zu => %u\\n\", offset, value);\n      } else {\n        WARNF(\"offset %zu out of range %zu\\n\", offset, buf_size);\n      }\n    }\n  }\n}\n\nsolver_result_t\nZ3Solver::solve(std::shared_ptr<SearchTask> task,\n                const uint8_t *in_buf, size_t in_size,\n                uint8_t *out_buf, size_t &out_size) {\n\n  try {\n    solver_.reset(); // reset solver\n    auto base_task = task->base_task;\n    std::vector<z3::expr> assumptions;\n    while (base_task != nullptr) {\n      // no need to solve\n      if (base_task->skip_next) {\n        DEBUGF(\"skipping task\\n\");\n        task->skip_next = true; // set the flag for following tasks\n        out_size = in_size;\n        memcpy(out_buf, in_buf, in_size);\n        if (base_task->solved) {\n          for (auto const &[offset, value] : base_task->solution) {\n            out_buf[offset] = value;\n          }\n          return SOLVER_SAT;\n        } else {\n          return SOLVER_UNSAT;\n        }\n      } else if (base_task->solved) {\n        for (auto const &[offset, value] : base_task->solution) {\n          z3::symbol symbol = context_.int_symbol(offset);\n          z3::sort sort = context_.bv_sort(8);\n          z3::expr i = context_.constant(symbol, sort);\n          assumptions.push_back(i == value);\n        }\n      }\n      base_task = base_task->base_task;\n    }\n\n    std::unordered_map<uint32_t, z3::expr> expr_cache;\n    for (size_t i = 0, n = task->size(); i < n; i++) {\n      auto const &c = task->constraints(i);\n      z3::expr z3expr = serialize_rel(task->comparisons(i), c->get_root(), c->input_args, expr_cache);\n      DEBUGF(\"adding expr %s\\n\", z3expr.to_string().c_str());\n      solver_.add(z3expr);\n    }\n    auto ret = solver_.check();\n    if (ret == z3::sat) {\n      memcpy(out_buf, in_buf, in_size);\n      out_size = in_size;\n      z3::model m = solver_.get_model();\n      extract_model(m, out_buf, out_size, task->solution);\n      if (!task->atoi_info().empty()) {\n        // if there are atoi bytes, handle them\n        for (auto const &[offset, info] : task->atoi_info()) {\n          uint64_t val = 0;\n          uint32_t length = std::get<0>(info);\n          memcpy(out_buf + offset, in_buf + offset, length); // restore?\n          for (auto i = length; i != 0; --i) {\n            DEBUGF(\"generate_input atoi offset:%d => %lu\\n\", offset + i - 1, val);\n            auto itr = task->solution.find(offset + i - 1);\n            if (itr != task->solution.end())\n              val |= itr->second << (8 * (i - 1));\n            else\n              val |= 0 << (8 * (i - 1));\n          }\n          uint32_t base = std::get<1>(info);\n          uint32_t orig_len = std::get<2>(info);\n          DEBUGF(\"generate_input atoi offset:%d => %lu, base = %d, original len = %d\\n\",\n              offset, val, base, orig_len);\n          const char *format = nullptr;\n          switch (base) {\n            case 2: format = \"%lb\"; break;\n            case 8: format = \"%lo\"; break;\n            case 10: format = \"%ld\"; break;\n            case 16: format = \"%lx\"; break;\n            default: WARNF(\"unsupported base %d\\n\", base);\n          }\n          if (format) {\n            snprintf((char*)out_buf + offset, in_size - offset, format, val);\n          }\n        }\n      }\n      task->solved = true;\n      return SOLVER_SAT;\n    } else if (ret == z3::unsat) {\n      return SOLVER_UNSAT;\n    } else {\n      return SOLVER_TIMEOUT;\n    }\n  } catch (z3::exception e) {\n    WARNF(\"z3 exception %s\\n\", e.msg());\n  }\n  return SOLVER_ERROR;\n}\n"
  },
  {
    "path": "solvers/z3-ts.cpp",
    "content": "#include \"dfsan/dfsan.h\"\n\n#include \"parse-z3.h\"\n\n#include <algorithm>\n#include <unordered_map>\n#include <unordered_set>\n#include <utility>\n#include <vector>\n\nusing namespace symsan;\n\n#define FILTER_WRONG_AST 1\n\nstatic const std::unordered_map<unsigned, const char*> OP_MAP {\n  {__dfsan::Extract, \"Extract\"},\n  {__dfsan::Trunc,   \"Trunc\"},\n  {__dfsan::Concat,  \"Concat\"},\n  {__dfsan::ZExt,    \"Zext\"},\n  {__dfsan::SExt,    \"Sext\"},\n  {__dfsan::Add,     \"Add\"},\n  {__dfsan::Sub,     \"Sub\"},\n  {__dfsan::Mul,     \"Mul\"},\n  {__dfsan::UDiv,    \"Udiv\"},\n  {__dfsan::SDiv,    \"Sdiv\"},\n  {__dfsan::URem,    \"Urem\"},\n  {__dfsan::SRem,    \"Srem\"},\n  {__dfsan::Shl,     \"Shl\"},\n  {__dfsan::LShr,    \"Lshr\"},\n  {__dfsan::AShr,    \"Ashr\"},\n  {__dfsan::And,     \"And\"},\n  {__dfsan::Or,      \"Or\"},\n  {__dfsan::Xor,     \"Xor\"},\n  // relational comparisons\n#define RELATIONAL_ICMP(cmp) (__dfsan::ICmp | (cmp << 8))\n  {RELATIONAL_ICMP(__dfsan::bveq),  \"Equal\"},\n  {RELATIONAL_ICMP(__dfsan::bvneq), \"Distinct\"},\n  {RELATIONAL_ICMP(__dfsan::bvugt), \"Ugt\"},\n  {RELATIONAL_ICMP(__dfsan::bvuge), \"Uge\"},\n  {RELATIONAL_ICMP(__dfsan::bvult), \"Ult\"},\n  {RELATIONAL_ICMP(__dfsan::bvule), \"Ule\"},\n  {RELATIONAL_ICMP(__dfsan::bvsgt), \"Sgt\"},\n  {RELATIONAL_ICMP(__dfsan::bvsge), \"Sge\"},\n  {RELATIONAL_ICMP(__dfsan::bvslt), \"Slt\"},\n  {RELATIONAL_ICMP(__dfsan::bvsle), \"Sle\"},\n#undef RELATIONAL_ICMP\n  // higher-order string ops\n  {__dfsan::fstrchr,  \"strchr\"},\n  {__dfsan::fstrrchr, \"strrchr\"},\n  {__dfsan::fstrstr,  \"strstr\"},\n  {__dfsan::fstrpbrk, \"strpbrk\"},\n  {__dfsan::fstr_off, \"stroff\"},\n  {__dfsan::fsubstr,  \"substr\"},\n  {__dfsan::fstrcat,  \"strcat\"},\n  {__dfsan::fprefixof, \"prefixof\"},\n  {__dfsan::fsuffixof, \"suffixof\"},\n};\n\nstatic std::string get_op_name(uint32_t op) {\n  auto itr = OP_MAP.find(op);\n  if (itr != OP_MAP.end()) {\n    return itr->second;\n  }\n  return std::to_string(op);\n}\n\n// Check if an op is a string operation (fstr_op_start to fstr_op_end)\nstatic inline bool is_string_op(uint16_t op) {\n  return op >= __dfsan::fstr_op_start && op < __dfsan::fstr_op_end;\n}\n\n// Check if an op is an indexOf-type operation (returns position, not content)\n// These are: fstrchr, fstrrchr, fstrstr, fstrpbrk, fstr_off\nstatic inline bool is_indexof_op(uint16_t op) {\n  return op >= __dfsan::fstrchr && op <= __dfsan::fstr_off;\n}\n\n// Check if an op is a content-type string operation (fsubstr, fstrcat)\nstatic inline bool is_content_string_op(uint16_t op) {\n  return op == __dfsan::fsubstr || op == __dfsan::fstrcat;\n}\n\n// Helper function to check if label tree contains indexOf operations\n// (used to skip validation since op1 is repurposed for haystack pointer)\nbool Z3AstParser::label_contains_indexof(dfsan_label label) {\n  if (label < CONST_OFFSET) return false;\n\n  dfsan_label_info *info = get_label_info(label);\n  if (is_indexof_op(info->op)) return true;\n\n  // Recursively check dependencies\n  if (info->l1 >= CONST_OFFSET && label_contains_indexof(info->l1)) return true;\n  if (info->l2 >= CONST_OFFSET && label_contains_indexof(info->l2)) return true;\n\n  return false;\n}\n\n// Decode Z3's escaped string format (e.g., \"\\u{1}\\u{2}\" -> bytes 0x01, 0x02)\nstatic std::vector<uint8_t> decode_z3_string(const std::string &str) {\n  std::vector<uint8_t> result;\n  size_t i = 0;\n  while (i < str.size()) {\n    if (i + 3 < str.size() && str[i] == '\\\\' && str[i+1] == 'u' && str[i+2] == '{') {\n      // Parse \\u{XXXX} escape sequence\n      size_t end = str.find('}', i + 3);\n      if (end != std::string::npos) {\n        std::string hex_str = str.substr(i + 3, end - (i + 3));\n        uint32_t code_point = std::stoul(hex_str, nullptr, 16);\n        // For simplicity, assume code points fit in a byte (for ASCII/Latin-1)\n        result.push_back((uint8_t)(code_point & 0xFF));\n        i = end + 1;\n        continue;\n      }\n    }\n    // Regular character\n    result.push_back((uint8_t)str[i]);\n    i++;\n  }\n  return result;\n}\n\nvoid Z3AstParser::dump_value_cache(dfsan_label label) {\n  if (label >= value_cache_.size()) {\n    throw z3::exception(\"invalid label for value cache\");\n  }\n  dfsan_label_info *info = get_label_info(label);\n  fprintf(stderr, \"label %u = l1: %u, l2: %u, op: %s, size: %u, op1: %lu, op2: %lu\\n\",\n          label, info->l1, info->l2, get_op_name(info->op).c_str(), info->size,\n          info->op1.i, info->op2.i);\n  fprintf(stderr, \"recalcuated value: %lu = op1: %lu, op2: %lu\\n\",\n          value_cache_[label], value_cache_[info->l1], value_cache_[info->l2]);\n  if (info->l1 != 0)\n    dump_value_cache(info->l1);\n  if (info->l2 != 0)\n    dump_value_cache(info->l2);\n}\n\nZ3AstParser::Z3AstParser(void *base, size_t size, z3::context &context)\n  : ASTParser(base, size), context_(context) {\n    input_name_format = \"input-%u-%u\";\n    atoi_name_format = \"atoi-%u-%u-%d-%lu\";       // input, offset, base, original_len\n    strlen_name_format = \"strlen-%u-%u-%lu-%u\";   // input, offset, original_len, null_from_input\n  }\n\nint Z3AstParser::restart(std::vector<input_t> &inputs) {\n\n  // reset caches\n  memcmp_cache_.clear();\n  string_ranges_.clear();\n  tsize_cache_.clear();\n  tsize_cache_.resize(1); // reserve for CONST_OFFSET\n  for (Z3_ast ast : expr_cache_) {\n    if (ast != nullptr) {\n      Z3_dec_ref(context_, ast); // decrement reference count\n    }\n  }\n  expr_cache_.clear();\n  expr_cache_.resize(1); // reserve for CONST_OFFSET\n  deps_cache_.clear();\n  deps_cache_.resize(1); // reserve for CONST_OFFSET\n#if FILTER_WRONG_AST\n  value_cache_.clear();\n  value_cache_.resize(1); // reserve for CONST_OFFSET\n#endif\n  string_info_cache_.clear();\n  branch_deps_.clear();\n  branch_deps_.resize(inputs.size());\n\n  for (size_t i = 0; i < inputs.size(); i++) {\n    auto &input = inputs[i];\n#if FILTER_WRONG_AST\n    inputs_cache_.emplace_back(input.first, input.second);\n#endif\n    // resize branch_deps_\n    branch_deps_[i].resize(input.second);\n  }\n\n  return 0;\n}\n\nz3::expr Z3AstParser::read_concrete(dfsan_label label, uint16_t size) {\n  auto itr = memcmp_cache_.find(label);\n  if (itr == memcmp_cache_.end()) {\n    throw z3::exception(\"cannot find memcmp content\");\n  }\n\n  z3::expr val = context_.bv_val(itr->second[0], 8);\n  for (uint8_t i = 1; i < size; i++) {\n    val = z3::concat(context_.bv_val(itr->second[i], 8), val);\n  }\n  return val;\n}\n\nstatic z3::expr get_cmd(z3::expr const &lhs, z3::expr const &rhs, uint32_t predicate) {\n  switch (predicate) {\n    case __dfsan::bveq:  return lhs == rhs;\n    case __dfsan::bvneq: return lhs != rhs;\n    case __dfsan::bvugt: return z3::ugt(lhs, rhs);\n    case __dfsan::bvuge: return z3::uge(lhs, rhs);\n    case __dfsan::bvult: return z3::ult(lhs, rhs);\n    case __dfsan::bvule: return z3::ule(lhs, rhs);\n    case __dfsan::bvsgt: return lhs > rhs;\n    case __dfsan::bvsge: return lhs >= rhs;\n    case __dfsan::bvslt: return lhs < rhs;\n    case __dfsan::bvsle: return lhs <= rhs;\n    default:\n      throw z3::exception(\"unsupported predicate\");\n      break;\n  }\n  // should never reach here\n  // std::unreachable();\n}\n\nstatic bool eval_icmp(uint16_t predicate, uint64_t val1, uint64_t val2, uint8_t bits) {\n  switch (predicate) {\n    case __dfsan::bveq:  return val1 == val2;\n    case __dfsan::bvneq: return val1 != val2;\n    case __dfsan::bvugt: return val1 > val2;\n    case __dfsan::bvuge: return val1 >= val2;\n    case __dfsan::bvult: return val1 < val2;\n    case __dfsan::bvule: return val1 <= val2;\n    case __dfsan::bvsgt:\n      switch(bits) {\n        case 8:  return (int8_t)val1 > (int8_t)val2;\n        case 16: return (int16_t)val1 > (int16_t)val2;\n        case 32: return (int32_t)val1 > (int32_t)val2;\n        case 64: return (int64_t)val1 > (int64_t)val2;\n        default:\n          throw z3::exception(\"unsupported bits for signed comparison\");\n      }\n    case __dfsan::bvsge:\n      switch(bits) {\n        case 8:  return (int8_t)val1 >= (int8_t)val2;\n        case 16: return (int16_t)val1 >= (int16_t)val2;\n        case 32: return (int32_t)val1 >= (int32_t)val2;\n        case 64: return (int64_t)val1 >= (int64_t)val2;\n        default:\n          throw z3::exception(\"unsupported bits for signed comparison\");\n      }\n    case __dfsan::bvslt:\n      switch(bits) {\n        case 8:  return (int8_t)val1 < (int8_t)val2;\n        case 16: return (int16_t)val1 < (int16_t)val2;\n        case 32: return (int32_t)val1 < (int32_t)val2;\n        case 64: return (int64_t)val1 < (int64_t)val2;\n        default:\n          throw z3::exception(\"unsupported bits for signed comparison\");\n      }\n    case __dfsan::bvsle:\n      switch(bits) {\n        case 8:  return (int8_t)val1 <= (int8_t)val2;\n        case 16: return (int16_t)val1 <= (int16_t)val2;\n        case 32: return (int32_t)val1 <= (int32_t)val2;\n        case 64: return (int64_t)val1 <= (int64_t)val2;\n        default:\n          throw z3::exception(\"unsupported bits for signed comparison\");\n      }\n    default:\n      throw z3::exception(\"unsupported predicate\");\n      return false; // unsupported predicate\n  }\n  // should never reach here\n  // std::unreachable();\n}\n\nz3::expr Z3AstParser::serialize(dfsan_label label, input_dep_set_t &deps) {\n  if (label < CONST_OFFSET || label == __dfsan::kInitializingLabel) {\n    throw z3::exception(\"invalid label\");\n  }\n\n  dfsan_label last_label = expr_cache_.size() - 1;\n  if (label > expr_cache_.capacity()) {\n    // reserve more caches if needed\n    tsize_cache_.reserve(label + SIZE_INCREMENT);\n    expr_cache_.reserve(label + SIZE_INCREMENT);\n    deps_cache_.reserve(label + SIZE_INCREMENT);\n#if FILTER_WRONG_AST\n    value_cache_.reserve(label + SIZE_INCREMENT);\n#endif\n  }\n\n  for (dfsan_label l = last_label + 1; l <= label; l++) {\n\n#if FILTER_WRONG_AST\n#define RECORD_VALUE(value) \\\n  value_cache_.emplace_back((uint64_t)(value))\n#else\n#define RECORD_VALUE(value) \\\n  do { } while (0)\n#endif\n\n    dfsan_label_info *info = get_label_info(l);\n    // fprintf(stderr, \"%u = (l1:%u, l2:%u, op:%s, size:%u, op1:%lu, op2:%lu)\\n\",\n    //         l, info->l1, info->l2, get_op_name(info->op).c_str(),\n    //         info->size, info->op1.i, info->op2.i);\n    input_dep_set_t &input_deps = deps_cache_.emplace_back();\n\n    // special ops\n    char name[256];\n    if (info->op == 0) {\n      // input\n      uint32_t offset = info->op1.i; // legacy: offset in op1\n      uint32_t input = info->op2.i;\n      snprintf(name, sizeof(name), input_name_format, input, offset);\n      z3::symbol symbol = context_.str_symbol(name);\n      z3::sort sort = context_.bv_sort(8);\n      tsize_cache_.emplace_back(1);\n      input_deps.insert(std::make_pair(input, offset));\n      // caching is not super helpful\n      cache_expr(l, context_.constant(symbol, sort));\n      RECORD_VALUE(inputs_cache_[input].first[offset]);\n      continue;\n    } else if (info->op == __dfsan::Load) {\n      uint32_t offset = get_label_info(info->l1)->op1.i; // legacy: offset in op1\n      uint32_t input = get_label_info(info->l1)->op2.i;\n      snprintf(name, sizeof(name), input_name_format, input, offset);\n      z3::symbol symbol = context_.str_symbol(name);\n      z3::sort sort = context_.bv_sort(8);\n      z3::expr out = context_.constant(symbol, sort);\n      input_deps.insert(std::make_pair(input, offset));\n#if FILTER_WRONG_AST\n      uint64_t val = inputs_cache_[input].first[offset];\n#endif\n      for (uint32_t i = 1; i < info->l2; i++) {\n        snprintf(name, sizeof(name), input_name_format, input, offset + i);\n        symbol = context_.str_symbol(name);\n        out = z3::concat(context_.constant(symbol, sort), out);\n        input_deps.insert(std::make_pair(input, offset + i));\n#if FILTER_WRONG_AST\n        val |= (uint64_t)inputs_cache_[input].first[offset + i] << (i * 8);\n#endif\n      }\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, out);\n      RECORD_VALUE(val);\n      continue;\n    } else if (info->op == __dfsan::ZExt) {\n      z3::expr base = get_cached_expr(info->l1, input_deps);\n      if (base.is_bool()) // dirty hack since llvm lacks bool\n        base = z3::ite(base, context_.bv_val(1, 1),\n                            context_.bv_val(0, 1));\n      uint32_t base_size = base.get_sort().bv_size();\n      tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n      cache_expr(l, z3::zext(base, info->size - base_size));\n      RECORD_VALUE(value_cache_[info->l1] & ((1UL << base_size) - 1));\n      continue;\n    } else if (info->op == __dfsan::SExt) {\n      z3::expr base = get_cached_expr(info->l1, input_deps);\n      uint32_t base_size = base.get_sort().bv_size();\n      tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n      cache_expr(l, z3::sext(base, info->size - base_size));\n      RECORD_VALUE((int64_t)(value_cache_[info->l1] & ((1UL << base_size) - 1)));\n      continue;\n    } else if (info->op == __dfsan::Trunc) {\n      z3::expr base = get_cached_expr(info->l1, input_deps);\n      tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n      cache_expr(l, base.extract(info->size - 1, 0));\n      RECORD_VALUE(value_cache_[info->l1] & ((1UL << info->size) - 1));\n      continue;\n    } else if (info->op == __dfsan::IntToPtr) {\n      z3::expr e = get_cached_expr(info->l1, input_deps);\n      tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n      cache_expr(l, e);\n      RECORD_VALUE(value_cache_[info->l1]);\n      continue;\n    } else if (info->op == __dfsan::PtrToInt) {\n      // PtrToInt converts a pointer to integer\n      // If the source is a string op result, convert the index to bitvector\n      if (info->l1 >= CONST_OFFSET) {\n        dfsan_label_info *src_info = get_label_info(info->l1);\n        if (src_info->op >= __dfsan::fstr_op_start && src_info->op < __dfsan::fstr_op_end) {\n          // String op result - the \"pointer\" is semantically the index\n          // Convert the Int expression to a bitvector for downstream ops\n          z3::expr idx = get_cached_expr(info->l1, input_deps);\n          z3::expr bv_idx = z3::int2bv(info->size, idx);\n          tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n          cache_expr(l, bv_idx);\n          RECORD_VALUE(value_cache_[info->l1]);\n          continue;\n        }\n      }\n      // For other PtrToInt cases, pass through (shouldn't normally reach here)\n      z3::expr e = get_cached_expr(info->l1, input_deps);\n      tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n      cache_expr(l, e);\n      RECORD_VALUE(value_cache_[info->l1]);\n      continue;\n    } //FIXME: other casting ops (BitCast)?\n    // symsan-defined\n    else if (info->op == __dfsan::Extract) {\n      z3::expr base = get_cached_expr(info->l1, input_deps);\n      tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n      cache_expr(l, base.extract((info->op2.i + info->size) - 1, info->op2.i));\n      RECORD_VALUE((value_cache_[info->l1] >> info->op2.i) &\n                    ((1UL << info->size) - 1));\n      continue;\n    } else if (info->op == __dfsan::Not) {\n      if (info->l2 == 0 || info->size != 1) {\n        throw z3::exception(\"invalid Not operation\");\n      }\n      z3::expr e = get_cached_expr(info->l2, input_deps);\n      tsize_cache_.emplace_back(tsize_cache_[info->l2]);\n      if (!e.is_bool()) {\n        throw z3::exception(\"Only LNot should be recorded\");\n      }\n      cache_expr(l, !e);\n      RECORD_VALUE(!value_cache_[info->l2]);\n      continue;\n    } else if (info->op == __dfsan::Neg) {\n      if (info->l2 == 0) {\n        throw z3::exception(\"invalid Neg predicate\");\n      }\n      z3::expr e = get_cached_expr(info->l2, input_deps);\n      tsize_cache_.emplace_back(tsize_cache_[info->l2]);\n      cache_expr(l, -e);\n      RECORD_VALUE(-value_cache_[info->l2]);\n      continue;\n    }\n    // higher-order\n    else if (info->op == __dfsan::fmemcmp) {\n      z3::expr op1 = (info->l1 >= CONST_OFFSET) ?\n                     get_cached_expr(info->l1, input_deps) :\n                     read_concrete(l, info->size); // memcmp size in bytes\n      if (info->l2 < CONST_OFFSET) {\n        throw z3::exception(\"invalid memcmp operand2\");\n      }\n      z3::expr op2 = get_cached_expr(info->l2, input_deps);\n      tsize_cache_.emplace_back(1);\n      z3::expr e = z3::ite(op1 == op2, context_.bv_val(0, 32),\n                                       context_.bv_val(1, 32));\n      cache_expr(l, e);\n      RECORD_VALUE(0); // memcmp result is always 0 or 1\n      continue;\n    } else if (info->op == __dfsan::fsize) {\n      // file size\n      z3::symbol symbol = context_.str_symbol(\"fsize\");\n      z3::sort sort = context_.bv_sort(info->size);\n      z3::expr base = context_.constant(symbol, sort);\n      tsize_cache_.emplace_back(1);\n      has_fsize = true; // XXX: set a flag\n      // don't cache because of deps\n      if (info->op1.i) {\n        // minus the offset stored in op1\n        z3::expr offset = context_.bv_val((uint64_t)info->op1.i, info->size);\n        cache_expr(l, base - offset);\n      } else {\n        cache_expr(l, base);\n      }\n      RECORD_VALUE(0); // FIXME: map to input size\n      continue;\n    } else if (info->op == __dfsan::fatoi) {\n      // string to integer conversion\n      assert(info->l1 == 0 && info->l2 >= CONST_OFFSET);\n      dfsan_label_info *src = get_label_info(info->l2);\n      assert(src->op == __dfsan::Load);\n      uint32_t offset = get_label_info(src->l1)->op1.i; // legacy: offset in op1\n      uint32_t input = get_label_info(src->l1)->op2.i;\n      int base = info->op1.i;\n      uint64_t orig_len = info->op2.i;\n      // FIXME: dependencies?\n      tsize_cache_.emplace_back(1);\n      // XXX: hacky, avoid string theory\n      snprintf(name, sizeof(name), atoi_name_format, input, offset, base, orig_len);\n      z3::symbol symbol = context_.str_symbol(name);\n      z3::sort sort = context_.bv_sort(info->size);\n      cache_expr(l, context_.constant(symbol, sort));\n      RECORD_VALUE(0); // FIXME: map to atoi result?\n      continue;\n    } else if (info->op == __dfsan::fstrlen) {\n      // Symbolic string length\n      // - l1 = 0 (following fsize/fatoi pattern)\n      // - l2 = content label (for input dependencies)\n      // - op1 = null_from_input flag (1 if null terminator is from input, 0 if programmatic)\n      // - op2 = actual length\n\n      // Extract offset and input_id from content label (l2)\n      uint32_t offset = 0;\n      uint32_t input_id = 0;\n      uint32_t null_from_input = info->op1.i;\n\n      if (info->l2 >= CONST_OFFSET) {\n        // Walk the content label to find base input offset\n        dfsan_label_info *str_info = get_label_info(info->l2);\n\n        // Handle Concat chain (common for multi-byte strings)\n        while (str_info->op == __dfsan::Concat && str_info->l1 >= CONST_OFFSET) {\n          str_info = get_label_info(str_info->l1);\n        }\n\n        // Base input labels have op=0, offset in op1\n        // (created by dfsan_create_label, not dfsan_union)\n        if (str_info->op == 0) {\n          // Direct input byte - offset stored in op1\n          offset = str_info->op1.i;\n          input_id = 0; // default input\n        } else if (str_info->op == __dfsan::Load) {\n          // Load from memory - get offset from pointer label\n          dfsan_label_info *ptr_info = get_label_info(str_info->l1);\n          offset = ptr_info->op1.i;\n          input_id = ptr_info->op2.i;\n        }\n      }\n\n      tsize_cache_.emplace_back(1);\n      // Create symbolic variable: strlen-input-offset-origlen-null_from_input\n      snprintf(name, sizeof(name), strlen_name_format, input_id, offset,\n               info->op2.i, null_from_input);\n      z3::symbol symbol = context_.str_symbol(name);\n      z3::sort sort = context_.bv_sort(info->size);\n      cache_expr(l, context_.constant(symbol, sort));\n      RECORD_VALUE(info->op2.i); // actual length for value cache\n      continue;\n    } else if (info->op == __dfsan::fstrchr) {\n      // strchr/memchr: find character in string\n      // l1 = source pointer label (content bytes, fsubstr, or previous strchr for chaining)\n      // l2 = c_label (target character - may be symbolic!)\n      // op1 = haystack pointer (for concrete content retrieval)\n      // op2 = char value\n      // size = haystack length if haystack concrete, else 0\n\n      // Build source string from l1 (content label)\n      z3::expr haystack_str = context_.string_val(\"\");\n      z3::expr start_offset = context_.int_val(0);\n\n      dfsan_label haystack_label = info->l1;\n      dfsan_label concrete_label = l;  // Track which label sent the concrete content\n      if (haystack_label >= CONST_OFFSET) {\n        // Symbolic haystack\n        dfsan_label_info *src_info = get_label_info(haystack_label);\n\n        if (is_content_string_op(src_info->op)) {\n          // l1 is a fsubstr/strcat - use the cached substr expression directly\n          haystack_str = get_cached_expr(haystack_label, input_deps);\n        } else if (is_indexof_op(src_info->op)) {\n          if (src_info->op == __dfsan::fstr_off) {\n            // Chained call via pointer arithmetic: strchr(t1 + N, c)\n            // Use build_string_from_label which handles fstr_off specially\n            // (creates insertion point if beyond end, or suffix if within bounds)\n            haystack_label = src_info->l1;\n            // start_offset stays 0 since we're searching from the start of the suffix/insertion point\n          } else {\n            // Chained call: search starts after previous match\n            z3::expr prev_idx = get_cached_expr(info->l1, input_deps);\n            start_offset = prev_idx + 1;\n            // Walk back to find original haystack content\n            haystack_label = info->l1;\n            dfsan_label_info *chain_info = src_info;\n            while (is_indexof_op(chain_info->op)) {\n              concrete_label = haystack_label;  // Save before updating\n              haystack_label = chain_info->l1;\n              if (haystack_label < CONST_OFFSET) break;\n              chain_info = get_label_info(haystack_label);\n            }\n          }\n          // Build string from original haystack label\n          if (haystack_label >= CONST_OFFSET) {\n              haystack_str = build_string_from_label(haystack_label, input_deps);\n          }\n        } else {\n          // Build string from byte content (Load, Concat, or single byte)\n          haystack_str = build_string_from_label(haystack_label, input_deps);\n        }\n      }\n\n      if (haystack_label < CONST_OFFSET) {\n        // Concrete haystack - retrieve from memcmp_cache using concrete_label\n        auto it = memcmp_cache_.find(concrete_label);\n        if (it != memcmp_cache_.end()) {\n          dfsan_label_info *concrete_info = get_label_info(concrete_label);\n          std::string haystack(reinterpret_cast<char*>(it->second.get()), concrete_info->size);\n          haystack_str = context_.string_val(haystack);\n        } else {\n          throw z3::exception(\"cannot find haystack content for strchr\");\n        }\n      }\n\n      // Get target character (concrete or symbolic)\n      // Use z3::unit to create single-char string from integer code point\n      z3::expr code(context_);\n      if (info->l2 == 0) {\n        // Concrete character\n        uint8_t c = (uint8_t)info->op2.i;\n        code = context_.int_val(c);\n      } else {\n        // Symbolic character - convert bitvector to int\n        z3::expr c_expr = get_cached_expr(info->l2, input_deps);\n        if (c_expr.get_sort().bv_size() != 8) {\n          c_expr = c_expr.extract(7, 0);\n        }\n        code = z3::bv2int(c_expr, false);\n      }\n      // Use Z3_mk_string_from_code to convert int to single-char String\n      z3::expr target_str(context_, Z3_mk_string_from_code(context_, code));\n\n      z3::expr idx = z3::indexof(haystack_str, target_str, start_offset);\n\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, idx);  // cache the index expression (Int sort)\n      RECORD_VALUE(0);  // Placeholder - validation skipped for indexOf ops\n      continue;\n    } else if (info->op == __dfsan::fstrrchr) {\n      // strrchr/memrchr: find LAST occurrence of character\n      // l1 = source pointer label (content bytes or fsubstr)\n      // l2 = c_label (target character - may be symbolic!)\n      // op1 = haystack pointer (for concrete content retrieval)\n      // op2 = char value\n      // size = haystack length if haystack concrete, else 0\n\n      // Build source string from l1 (content label or fsubstr)\n      z3::expr haystack_str = context_.string_val(\"\");\n      if (info->l1 >= CONST_OFFSET) {\n        // Symbolic haystack\n        dfsan_label_info *src_info = get_label_info(info->l1);\n        if (is_content_string_op(src_info->op)) {\n          // l1 is a fsubstr/strcat - use the cached substr expression directly\n          haystack_str = get_cached_expr(info->l1, input_deps);\n        } else {\n          haystack_str = build_string_from_label(info->l1, input_deps);\n        }\n      } else {\n        // Concrete haystack - retrieve from memcmp_cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          // Use info->size for haystack length (set in runtime)\n          std::string haystack(reinterpret_cast<char*>(it->second.get()), info->size);\n          haystack_str = context_.string_val(haystack);\n        } else {\n          throw z3::exception(\"cannot find haystack content for strrchr\");\n        }\n      }\n\n      // Get target character (concrete or symbolic)\n      // Use z3::unit to create single-char string from integer code point\n      z3::expr code(context_);\n      if (info->l2 == 0) {\n        // Concrete character\n        uint8_t c = (uint8_t)info->op2.i;\n        code = context_.int_val(c);\n      } else {\n        // Symbolic character - convert bitvector to int\n        z3::expr c_expr = get_cached_expr(info->l2, input_deps);\n        if (c_expr.get_sort().bv_size() != 8) {\n          c_expr = c_expr.extract(7, 0);\n        }\n        code = z3::bv2int(c_expr, false);\n      }\n      // Use Z3_mk_string_from_code to convert int to single-char String\n      z3::expr target_str(context_, Z3_mk_string_from_code(context_, code));\n\n      // For reverse search, find the last occurrence\n      z3::expr idx = z3::last_indexof(haystack_str, target_str);\n\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, idx);\n      RECORD_VALUE(0);  // Placeholder - validation skipped for indexOf ops\n      continue;\n    } else if (info->op == __dfsan::fstrstr) {\n      // strstr: find substring\n      // l1 = haystack content label (for chaining or byte content)\n      // l2 = needle_label (may be symbolic!)\n      // op1 = haystack pointer (for concrete content retrieval)\n      // op2 = needle pointer (for concrete content retrieval)\n      // size = haystack length if haystack concrete, else needle length if needle concrete, else 0\n\n      // Build haystack string from l1\n      z3::expr haystack_str = context_.string_val(\"\");\n      z3::expr start_offset = context_.int_val(0);\n\n      dfsan_label haystack_label = info->l1;\n      dfsan_label concrete_label = l;  // Track which label sent the concrete content\n      if (haystack_label >= CONST_OFFSET) {\n        // Symbolic haystack\n        dfsan_label_info *src_info = get_label_info(haystack_label);\n\n        if (is_content_string_op(src_info->op)) {\n          // l1 is a fsubstr/strcat - use the cached substr expression directly\n          haystack_str = get_cached_expr(haystack_label, input_deps);\n        } else if (is_indexof_op(src_info->op)) {\n          if (src_info->op == __dfsan::fstr_off) {\n            // Chained call via pointer arithmetic\n            haystack_label = src_info->l1;\n          } else {\n            // Chained call: search starts after previous match\n            z3::expr prev_idx = get_cached_expr(info->l1, input_deps);\n            start_offset = prev_idx + 1;\n            // Walk back to find original haystack content\n            haystack_label = info->l1;\n            dfsan_label_info *chain_info = src_info;\n            while (is_indexof_op(chain_info->op)) {\n              concrete_label = haystack_label;  // Save before updating\n              haystack_label = chain_info->l1;\n              if (haystack_label < CONST_OFFSET) break;\n              chain_info = get_label_info(haystack_label);\n            }\n          }\n          // Build string from original haystack label\n          if (haystack_label >= CONST_OFFSET) {\n            haystack_str = build_string_from_label(haystack_label, input_deps);\n          }\n        } else {\n          // Build string from byte content\n          haystack_str = build_string_from_label(haystack_label, input_deps);\n        }\n      }\n\n      if (haystack_label < CONST_OFFSET) {\n        // Concrete haystack - retrieve from memcmp_cache using concrete_label\n        auto it = memcmp_cache_.find(concrete_label);\n        if (it != memcmp_cache_.end()) {\n          dfsan_label_info *concrete_info = get_label_info(concrete_label);\n          std::string haystack(reinterpret_cast<char*>(it->second.get()), concrete_info->size);\n          haystack_str = context_.string_val(haystack);\n        } else {\n          throw z3::exception(\"cannot find haystack content for strstr\");\n        }\n      }\n\n      // Get needle (concrete or symbolic)\n      z3::expr needle_str(context_);\n      if (info->l2 == 0) {\n        // Concrete needle - get from cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          // Build string from cached bytes using info->size for length\n          std::string needle(reinterpret_cast<char*>(it->second.get()), info->size);\n          needle_str = context_.string_val(needle);\n        } else {\n          throw z3::exception(\"cannot find concrete needle content\");\n        }\n      } else {\n        // Symbolic needle - build string from l2 (Load of tainted buffer)\n        needle_str = build_string_from_label(info->l2, input_deps);\n      }\n\n      z3::expr idx = z3::indexof(haystack_str, needle_str, start_offset);\n\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, idx);\n      RECORD_VALUE(0);  // Placeholder - validation skipped for indexOf ops\n      continue;\n    } else if (info->op == __dfsan::fstrpbrk) {\n      // strpbrk: find first character from accept set\n      // l1 = source content label\n      // l2 = accept_label (may be symbolic)\n      // op1 = haystack pointer (for concrete content retrieval)\n      // op2 = accept pointer (for concrete content retrieval)\n      // size = haystack length if haystack concrete, else accept length if accept concrete, else 0\n\n      // Build source string from l1\n      z3::expr haystack_str = context_.string_val(\"\");\n      z3::expr start_offset = context_.int_val(0);\n\n      dfsan_label haystack_label = info->l1;\n      dfsan_label concrete_label = l;  // Track which label sent the concrete content\n      if (haystack_label >= CONST_OFFSET) {\n        // Symbolic haystack\n        dfsan_label_info *src_info = get_label_info(haystack_label);\n\n        if (is_content_string_op(src_info->op)) {\n          haystack_str = get_cached_expr(haystack_label, input_deps);\n        } else if (is_indexof_op(src_info->op)) {\n          if (src_info->op == __dfsan::fstr_off) {\n            // Chained call via pointer arithmetic\n            haystack_label = src_info->l1;\n          } else {\n            // Chained call\n            z3::expr prev_idx = get_cached_expr(info->l1, input_deps);\n            start_offset = prev_idx + 1;\n            haystack_label = info->l1;\n            dfsan_label_info *chain_info = src_info;\n            while (is_indexof_op(chain_info->op)) {\n              concrete_label = haystack_label;  // Save before updating\n              haystack_label = chain_info->l1;\n              if (haystack_label < CONST_OFFSET) break;\n              chain_info = get_label_info(haystack_label);\n            }\n          }\n          // Build string from original haystack label\n          if (haystack_label >= CONST_OFFSET) {\n            haystack_str = build_string_from_label(haystack_label, input_deps);\n          }\n        } else {\n          haystack_str = build_string_from_label(haystack_label, input_deps);\n        }\n      }\n\n      if (haystack_label < CONST_OFFSET) {\n        // Concrete haystack - retrieve from memcmp_cache using concrete_label\n        auto it = memcmp_cache_.find(concrete_label);\n        if (it != memcmp_cache_.end()) {\n          dfsan_label_info *concrete_info = get_label_info(concrete_label);\n          std::string haystack(reinterpret_cast<char*>(it->second.get()), concrete_info->size);\n          haystack_str = context_.string_val(haystack);\n        } else {\n          throw z3::exception(\"cannot find haystack content for strpbrk\");\n        }\n      }\n\n      // Get accept character set\n      z3::expr idx(context_);\n      if (info->l2 == 0) {\n        // Concrete accept set - get from cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end() && info->size > 0) {\n          // Simplified approach: use first character's index as representative\n          // and add constraint that any character could be found\n          // This works well for NULL checks (if (strpbrk(s, accept)))\n          uint8_t first_c = it->second.get()[0];\n          z3::expr code = context_.int_val(first_c);\n          z3::expr char_str(context_, Z3_mk_string_from_code(context_, code));\n          idx = z3::indexof(haystack_str, char_str, start_offset);\n        } else {\n          throw z3::exception(\"cannot find concrete accept content\");\n        }\n      } else {\n        // Symbolic accept set - build string from label\n        z3::expr accept_str = build_string_from_label(info->l2, input_deps);\n        // Get the length of accept string\n        z3::expr accept_len(context_, Z3_mk_seq_length(context_, accept_str));\n        // strpbrk returns NULL if accept is empty, so: if (len > 0) indexOf else -1\n        z3::expr first_char_str(context_, Z3_mk_seq_extract(context_, accept_str, context_.int_val(0), context_.int_val(1)));\n        z3::expr idx_if_nonempty = z3::indexof(haystack_str, first_char_str, start_offset);\n        idx = z3::ite(accept_len > 0, idx_if_nonempty, context_.int_val(-1));\n      }\n\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, idx.simplify());\n      RECORD_VALUE(0);  // Placeholder - validation skipped for indexOf ops\n      continue;\n    } else if (info->op == __dfsan::fsubstr) {\n      // fsubstr: substring with symbolic position/length\n      // l1 = original content label (full haystack from previous string op)\n      // l2 = string op label (position or length depending on mode)\n      // op1 = concrete length n\n      // op2 = 0 for prefix mode (from 0 to l2), 1 for suffix mode (from l2 to end)\n\n      // Build the full string from l1 (the original content)\n      z3::expr full_str = context_.string_val(\"\");\n      if (info->l1 >= CONST_OFFSET) {\n        full_str = build_string_from_label(info->l1, input_deps);\n      }\n\n      z3::expr substr_expr(context_);\n      bool suffix_mode = (info->op2.i == 1);\n\n      if (suffix_mode) {\n        // Suffix mode: substr(str, start_pos, remaining_len)\n        // l2 is fstr_off - need to extract the start position\n        z3::expr start_pos = context_.int_val(0);\n        if (info->l2 >= CONST_OFFSET) {\n          dfsan_label_info *l2_info = get_label_info(info->l2);\n          if (l2_info->op == __dfsan::fstr_off) {\n            // fstr_off: l1 = indexOf op, op2 = byte offset\n            // start_pos = indexOf_result + offset\n            z3::expr base_idx = get_cached_expr(l2_info->l1, input_deps);\n            start_pos = base_idx + context_.int_val((int64_t)l2_info->op2.i);\n          } else {\n            // Direct indexOf op\n            start_pos = get_cached_expr(info->l2, input_deps);\n          }\n        }\n        // Use large length to get \"rest of string\" - Z3 will clamp to actual length\n        z3::expr full_len(context_, Z3_mk_seq_length(context_, full_str));\n        z3::expr len_expr = full_len - start_pos;\n        substr_expr = z3::expr(context_, Z3_mk_seq_extract(context_,\n                                                           full_str,\n                                                           start_pos,\n                                                           len_expr));\n      } else {\n        // Prefix mode: substr(str, 0, len)\n        z3::expr len_expr = context_.int_val((int64_t)info->op1.i);\n        if (info->l2 >= CONST_OFFSET) {\n          // l2 is the string op label - its cached value is the index/length\n          len_expr = get_cached_expr(info->l2, input_deps);\n        }\n        substr_expr = z3::expr(context_, Z3_mk_seq_extract(context_,\n                                                           full_str,\n                                                           context_.int_val(0),\n                                                           len_expr));\n      }\n\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, substr_expr);\n      // The substr itself doesn't have a numeric value, but downstream ops will use it\n      RECORD_VALUE(info->op1.i);\n      continue;\n    } else if (info->op == __dfsan::fstrcat) {\n      // strcat: string concatenation\n      // l1 = dest string label\n      // l2 = src string label\n      // op1 = dest pointer (for concrete content access)\n      // op2 = src pointer (for concrete content access)\n      // size = length of concrete operand (for memcmp_cache), 0 if both symbolic\n\n      z3::expr dest_str = context_.string_val(\"\");\n      z3::expr src_str = context_.string_val(\"\");\n\n      // Build dest string from l1\n      // Only fsubstr and fstrcat cache String expressions; other string ops cache Int (position)\n      if (info->l1 >= CONST_OFFSET) {\n        dfsan_label_info *l1_info = get_label_info(info->l1);\n        if (is_content_string_op(l1_info->op)) {\n          dest_str = get_cached_expr(info->l1, input_deps);\n        } else {\n          dest_str = build_string_from_label(info->l1, input_deps);\n        }\n      } else {\n        // Concrete dest - get from memcmp_cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          dest_str = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find strcat content\");\n        }\n      }\n\n      // Build src string from l2\n      if (info->l2 >= CONST_OFFSET) {\n        dfsan_label_info *l2_info = get_label_info(info->l2);\n        if (is_content_string_op(l2_info->op)) {\n          src_str = get_cached_expr(info->l2, input_deps);\n        } else {\n          src_str = build_string_from_label(info->l2, input_deps);\n        }\n      } else {\n        // Concrete src - get from memcmp_cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          src_str = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find strcat content\");\n        }\n      }\n\n      // Create Z3 string concatenation\n      z3::expr concat_result = z3::concat(dest_str, src_str);\n\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, concat_result);\n      RECORD_VALUE(0);\n      continue;\n    } else if (info->op == __dfsan::fstrcmp) {\n      // String comparison using Z3 string theory\n      // l1 = first string label (may be fsubstr or content)\n      // l2 = second string label (may be fsubstr or content)\n      // size = comparison length (in bytes, for memcmp_cache lookup)\n      // op1 = s1 pointer (for memcmp_cache lookup)\n      // op2 = s2 pointer (for memcmp_cache lookup)\n\n      z3::expr str1 = context_.string_val(\"\");\n      z3::expr str2 = context_.string_val(\"\");\n\n      // Build first string\n      if (info->l1 >= CONST_OFFSET) {\n        dfsan_label_info *l1_info = get_label_info(info->l1);\n        if (is_content_string_op(l1_info->op)) {\n          // fsubstr/fstrcat - get the cached String expression\n          str1 = get_cached_expr(info->l1, input_deps);\n        } else {\n          // Regular content - build string from labels\n          str1 = build_string_from_label(info->l1, input_deps);\n        }\n      } else {\n        // Concrete - get from memcmp_cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          str1 = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find strcmp content\");\n        }\n      }\n\n      // Build second string\n      if (info->l2 >= CONST_OFFSET) {\n        dfsan_label_info *l2_info = get_label_info(info->l2);\n        if (is_content_string_op(l2_info->op)) {\n          // fsubstr/fstrcat - get the cached String expression\n          str2 = get_cached_expr(info->l2, input_deps);\n        } else {\n          // Regular content - build string from labels\n          str2 = build_string_from_label(info->l2, input_deps);\n        }\n      } else {\n        // Concrete - get from memcmp_cache\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          str2 = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find strcmp content\");\n        }\n      }\n\n      // Create equality: strcmp returns 0 when equal, non-zero otherwise\n      z3::expr eq = z3::ite(str1 == str2,\n                             context_.bv_val(0, 32),\n                             context_.bv_val(1, 32));\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, eq);\n      RECORD_VALUE(0);\n      continue;\n    } else if (info->op == __dfsan::fprefixof) {\n      // prefixof: check if str starts with prefix\n      // l1 = string label, l2 = prefix label\n      // size = comparison length, op1 = str ptr, op2 = prefix ptr\n\n      z3::expr str = context_.string_val(\"\");\n      z3::expr prefix = context_.string_val(\"\");\n\n      // Build first string (str)\n      if (info->l1 >= CONST_OFFSET) {\n        dfsan_label_info *l1_info = get_label_info(info->l1);\n        if (is_content_string_op(l1_info->op)) {\n          str = get_cached_expr(info->l1, input_deps);\n        } else {\n          str = build_string_from_label(info->l1, input_deps);\n        }\n      } else {\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          str = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find prefixof str content\");\n        }\n      }\n\n      // Build second string (prefix)\n      if (info->l2 >= CONST_OFFSET) {\n        dfsan_label_info *l2_info = get_label_info(info->l2);\n        if (is_content_string_op(l2_info->op)) {\n          prefix = get_cached_expr(info->l2, input_deps);\n        } else {\n          prefix = build_string_from_label(info->l2, input_deps);\n        }\n      } else {\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          prefix = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find prefixof prefix content\");\n        }\n      }\n\n      // Use Z3's prefixof: returns 1 if str starts with prefix, else 0\n      z3::expr result = z3::ite(z3::prefixof(prefix, str),\n                                 context_.bv_val(1, 32),\n                                 context_.bv_val(0, 32));\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, result);\n      RECORD_VALUE(0);\n      continue;\n    } else if (info->op == __dfsan::fsuffixof) {\n      // suffixof: check if str ends with suffix\n      // l1 = string label, l2 = suffix label\n      // size = comparison length, op1 = str ptr, op2 = suffix ptr\n\n      z3::expr str = context_.string_val(\"\");\n      z3::expr suffix = context_.string_val(\"\");\n\n      // Build first string (str) - same pattern as fprefixof\n      if (info->l1 >= CONST_OFFSET) {\n        dfsan_label_info *l1_info = get_label_info(info->l1);\n        if (is_content_string_op(l1_info->op)) {\n          str = get_cached_expr(info->l1, input_deps);\n        } else {\n          str = build_string_from_label(info->l1, input_deps);\n        }\n      } else {\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          str = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find suffixof str content\");\n        }\n      }\n\n      // Build second string (suffix)\n      if (info->l2 >= CONST_OFFSET) {\n        dfsan_label_info *l2_info = get_label_info(info->l2);\n        if (is_content_string_op(l2_info->op)) {\n          suffix = get_cached_expr(info->l2, input_deps);\n        } else {\n          suffix = build_string_from_label(info->l2, input_deps);\n        }\n      } else {\n        auto it = memcmp_cache_.find(l);\n        if (it != memcmp_cache_.end()) {\n          std::string s(reinterpret_cast<char*>(it->second.get()), info->size);\n          suffix = context_.string_val(s);\n        } else {\n          throw z3::exception(\"cannot find suffixof suffix content\");\n        }\n      }\n\n      // Use Z3's suffixof: returns 1 if str ends with suffix, else 0\n      z3::expr result = z3::ite(z3::suffixof(suffix, str),\n                                 context_.bv_val(1, 32),\n                                 context_.bv_val(0, 32));\n      tsize_cache_.emplace_back(1);\n      cache_expr(l, result);\n      RECORD_VALUE(0);\n      continue;\n    } else if (info->op == __dfsan::fstr_off) {\n      // fstr_off: string op pointer + constant offset (from GEP)\n      // l1 = string op label (fstrchr result)\n      // op2 = byte offset (e.g., 1 for sep + 1)\n      // The result is a pointer with the offset recorded for later substr calculation\n\n      if (info->l1 >= CONST_OFFSET) {\n        // Get the index expression for the base string op\n        z3::expr idx_expr = get_cached_expr(info->l1, input_deps);\n        int64_t gep_offset = (int64_t)info->op2.i;\n\n        // The fstr_off result is idx + offset (still an Int for string indexing)\n        z3::expr offset_idx = idx_expr + (int)gep_offset;\n\n        tsize_cache_.emplace_back(tsize_cache_[info->l1]);\n        cache_expr(l, offset_idx);\n        // Record the concrete position + offset\n        RECORD_VALUE(value_cache_[info->l1] + gep_offset);\n      } else {\n        // No base label, just return zero\n        tsize_cache_.emplace_back(0);\n        cache_expr(l, context_.int_val(0));\n        RECORD_VALUE(0);\n      }\n      continue;\n    } else if (info->op == __dfsan::Alloca || info->op == __dfsan::Free) {\n      // not expression, do nothing\n      tsize_cache_.emplace_back(0);\n      expr_cache_.emplace_back(nullptr);\n      RECORD_VALUE(0);\n      continue;\n    }\n\n    // common ops\n    uint16_t size = info->size;  // Must be uint16_t to handle sizes > 255 bits\n\n    // Early check for ICmp with string functions - handle before creating BVs\n    // because string functions use 'size' field for other purposes (e.g., needle length)\n    if ((info->op & 0xff) == __dfsan::ICmp) {\n      uint16_t l1_op = info->l1 >= CONST_OFFSET ? get_label_info(info->l1)->op : 0;\n      uint16_t l2_op = info->l2 >= CONST_OFFSET ? get_label_info(info->l2)->op : 0;\n      bool l1_is_strfunc = (l1_op >= __dfsan::fstr_op_start && l1_op < __dfsan::fstr_op_end);\n      bool l2_is_strfunc = (l2_op >= __dfsan::fstr_op_start && l2_op < __dfsan::fstr_op_end);\n\n      if (l1_is_strfunc || l2_is_strfunc) {\n        // String function comparison - convert index to found/not-found\n        // strchr returns -1 for not found, >= 0 for found\n        z3::expr cmp_expr(context_);\n        z3::expr zero = context_.int_val(0);\n        int64_t found_pos;\n        bool found;\n        uint16_t predicate = info->op >> 8;\n\n        if (l1_is_strfunc && info->l2 == 0 && info->op2.i == 0) {\n          // Comparing string result with NULL (0)\n          z3::expr idx = get_cached_expr(info->l1, input_deps);\n          found_pos = (int64_t)value_cache_[info->l1];\n          found = found_pos >= 0;\n          z3::expr found_expr = idx >= zero;\n          if (predicate == __dfsan::bvneq) {\n            cmp_expr = found_expr;  // != NULL means found\n          } else if (predicate == __dfsan::bveq) {\n            cmp_expr = !found_expr; // == NULL means not found\n          } else {\n            throw z3::exception(\"unsupported predicate for string search result\");\n          }\n        } else if (l2_is_strfunc && info->l1 == 0 && info->op1.i == 0) {\n          // NULL compared with string result\n          z3::expr idx = get_cached_expr(info->l2, input_deps);\n          found_pos = (int64_t)value_cache_[info->l2];\n          found = found_pos >= 0;\n          z3::expr found_expr = idx >= zero;\n          if (predicate == __dfsan::bvneq) {\n            cmp_expr = found_expr;  // != NULL means found\n          } else if (predicate == __dfsan::bveq) {\n            cmp_expr = !found_expr; // == NULL means not found\n          } else {\n            throw z3::exception(\"unsupported predicate for string search result\");\n          }\n        } else {\n          throw z3::exception(\"unsupported string comparison\");\n        }\n\n        tsize_cache_.emplace_back(tsize_cache_[info->l1] + tsize_cache_[info->l2]);\n        cache_expr(l, cmp_expr);\n#if FILTER_WRONG_AST\n        // For string ops, calculate value based on found/not-found semantics\n        bool cmp_result = (predicate == __dfsan::bvneq) ? found : !found;\n        value_cache_.emplace_back(cmp_result ? 1 : 0);\n#endif\n        continue;\n      }\n    }\n\n    uint64_t valmask = size < 64 ? (1UL << size) - 1 : ~0UL;\n    // size for concat is a bit complicated ...\n    if (info->op == __dfsan::Concat && info->l1 == 0) {\n      assert(info->l2 >= CONST_OFFSET);\n      size = info->size - get_label_info(info->l2)->size;\n      valmask = (1UL << size) - 1;\n    }\n    z3::expr op1 = context_.bv_val((uint64_t)info->op1.i, size);\n    uint64_t val1 = info->op1.i & valmask;\n    if (info->l1 >= CONST_OFFSET) {\n      op1 = get_cached_expr(info->l1, input_deps).simplify();\n      if (op1.is_bv() && info->op != __dfsan::Concat) {\n        // XXX: fix size mismatch, only for bv and not concat\n        uint8_t op_size = op1.get_sort().bv_size();\n        if (op_size > size) {\n          op1 = op1.extract(size - 1, 0);\n        } else if (op_size < size) {\n          op1 = z3::zext(op1, size - op_size);\n        }\n      }\n#if FILTER_WRONG_AST\n      val1 = value_cache_[info->l1] & valmask;\n#endif\n    } else if (info->size == 1) {\n      op1 = context_.bool_val(info->op1.i == 1);\n    }\n    // handle op2\n    if (info->op == __dfsan::Concat && info->l2 == 0) {\n      assert(info->l1 >= CONST_OFFSET);\n      size = info->size - get_label_info(info->l1)->size;\n      valmask = (1UL << size) - 1;\n    }\n    z3::expr op2 = context_.bv_val((uint64_t)info->op2.i, size);\n    uint64_t val2 = info->op2.i & valmask;\n    if (info->l2 >= CONST_OFFSET) {\n      op2 = get_cached_expr(info->l2, input_deps).simplify();\n      if (op2.is_bv() && info->op != __dfsan::Concat) {\n        // XXX: fix size mismatch, only for bv and not concat\n        uint8_t op_size = op2.get_sort().bv_size();\n        if (op_size > size) {\n          op2 = op2.extract(size - 1, 0);\n        } else if (op_size < size) {\n          op2 = z3::zext(op2, size - op_size);\n        }\n      }\n#if FILTER_WRONG_AST\n      val2 = value_cache_[info->l2] & valmask;\n#endif\n    } else if (info->size == 1) {\n      op2 = context_.bool_val(info->op2.i == 1);\n    }\n    // update tree_size\n    tsize_cache_.emplace_back(tsize_cache_[info->l1] + tsize_cache_[info->l2]);\n\n    switch((info->op & 0xff)) {\n      // llvm doesn't distinguish between logical and bitwise and/or/xor\n      case __dfsan::And: {\n        cache_expr(l, info->size != 1 ? (op1 & op2) : (op1 && op2));\n        RECORD_VALUE((info->size != 1) ? (val1 & val2) : (val1 && val2));\n        break;\n      }\n      case __dfsan::Or: {\n        cache_expr(l, info->size != 1 ? (op1 | op2) : (op1 || op2));\n        RECORD_VALUE((info->size != 1) ? (val1 | val2) : (val1 || val2));\n        break;\n      }\n      case __dfsan::Xor: {\n        cache_expr(l, op1 ^ op2);\n        RECORD_VALUE(val1 ^ val2);\n        break;\n      }\n      case __dfsan::Shl: {\n        cache_expr(l, z3::shl(op1, op2));\n        RECORD_VALUE(val1 << (val2 % size));\n        break;\n      }\n      case __dfsan::LShr: {\n        cache_expr(l, z3::lshr(op1, op2));\n        RECORD_VALUE(val1 >> (val2 % size));\n        break;\n      }\n      case __dfsan::AShr: {\n        cache_expr(l, z3::ashr(op1, op2));\n        RECORD_VALUE((int64_t)val1 >> (val2 % size));\n        break;\n      }\n      case __dfsan::Add: {\n        cache_expr(l, op1 + op2);\n        RECORD_VALUE(val1 + val2);\n        break;\n      }\n      case __dfsan::Sub: {\n        // Check for pointer arithmetic pattern: (ptr_with_string_op) - base_addr\n        // When l1 is PtrToInt of a string op and l2 is constant (untainted base),\n        // the result is just the index (since ptr = base + index, so ptr - base = index)\n        if (info->l1 >= CONST_OFFSET && info->l2 == 0) {\n          dfsan_label_info *l1_info = get_label_info(info->l1);\n          if (l1_info->op == __dfsan::PtrToInt && l1_info->l1 >= CONST_OFFSET) {\n            dfsan_label_info *src_info = get_label_info(l1_info->l1);\n            if (src_info->op >= __dfsan::fstr_op_start &&\n                src_info->op < __dfsan::fstr_op_end) {\n              // This is (PtrToInt(string_op)) - base_addr = index\n              // The expression is just the index (op1 already contains int2bv(idx))\n              cache_expr(l, op1);\n              // The value is just the index, not idx - base_addr\n              RECORD_VALUE(val1);\n              break;\n            }\n          }\n        }\n        cache_expr(l, op1 - op2);\n        RECORD_VALUE(val1 - val2);\n        break;\n      }\n      case __dfsan::Mul: {\n        cache_expr(l, op1 * op2);\n        RECORD_VALUE(val1 * val2);\n        break;\n      }\n      case __dfsan::UDiv: {\n        cache_expr(l, z3::udiv(op1, op2));\n        if (val2 == 0) {\n          fprintf(stderr, \"WARNING: division by zero for label %u\\n\", l);\n          RECORD_VALUE(0);\n        } else\n          RECORD_VALUE(val1 / val2);\n        break;\n      }\n      case __dfsan::SDiv: {\n        cache_expr(l, op1 / op2);\n        if (val2 == 0) {\n          fprintf(stderr, \"WARNING: division by zero for label %u\\n\", l);\n          RECORD_VALUE(0);\n        } else\n          RECORD_VALUE((int64_t)val1 / (int64_t)val2);\n        break;\n      }\n      case __dfsan::URem: {\n        cache_expr(l, z3::urem(op1, op2));\n        if (val2 == 0) {\n          fprintf(stderr, \"WARNING: division by zero for label %u\\n\", l);\n          RECORD_VALUE(0);\n        } else\n          RECORD_VALUE(val1 % val2);\n        break;\n      }\n      case __dfsan::SRem: {\n        cache_expr(l, z3::srem(op1, op2));\n        if (val2 == 0) {\n          fprintf(stderr, \"WARNING: division by zero for label %u\\n\", l);\n          RECORD_VALUE(0);\n        } else\n          RECORD_VALUE((int64_t)val1 % (int64_t)val2);\n        break;\n      }\n      // relational\n      case __dfsan::ICmp: {\n        // Note: string function ICmps are handled early before BV creation\n        uint16_t l1_op = info->l1 >= CONST_OFFSET ? get_label_info(info->l1)->op : 0;\n        uint16_t l2_op = info->l2 >= CONST_OFFSET ? get_label_info(info->l2)->op : 0;\n\n        // fprintf(stderr, \"DEBUG serialize ICmp label %u: l1=%u (op=%u), l2=%u (op=%u), predicate=%u\\n\",\n        //         l, info->l1, l1_op, info->l2, l2_op, info->op >> 8);\n        // fprintf(stderr, \"DEBUG serialize ICmp: val1=%lu (cached), val2=%lu (cached), op1.i=%lu (runtime), op2.i=%lu (runtime)\\n\",\n        //         val1, val2, (uint64_t)info->op1.i, (uint64_t)info->op2.i);\n\n#if FILTER_WRONG_AST\n        // we have both operands recorded for ICmp\n        if ((info->op1.i & valmask) != val1 ||\n            (info->op2.i & valmask) != val2) {\n          fprintf(stderr, \"DEBUG serialize ICmp: VALUE MISMATCH detected\\n\");\n          // fprintf(stderr, \"WARNING: value mismatch for label %u:\"\n          //         \"expected op1 %lu, got %lu, expected op2 %lu, got %lu\\n\",\n          //         l, info->op1.i, val1, info->op2.i, val2);\n          // fprintf(stderr, \"cond: %s\\n\", get_cmd(op1, op2, info->op >> 8).to_string().c_str());\n          // dump_value_cache(info->l1);\n          // dump_value_cache(info->l2);\n\n          // Special cases where we don't have the actual value cached:\n          // - memcmp/atoi/strcmp: fix using runtime value from ICmp\n          // - indexOf operations: op1 repurposed for haystack pointer, skip validation\n          bool is_special = false;\n          if (l1_op == __dfsan::fmemcmp || l1_op == __dfsan::fatoi || l1_op == __dfsan::fstrcmp) {\n            fprintf(stderr, \"DEBUG serialize ICmp: fixing up value_cache_[%u] from %lu to %lu (op=%u)\\n\",\n                    info->l1, value_cache_[info->l1], (uint64_t)info->op1.i, l1_op);\n            value_cache_[info->l1] = val1 = info->op1.i;\n            is_special = true;\n          }\n          if (l2_op == __dfsan::fmemcmp || l2_op == __dfsan::fatoi || l2_op == __dfsan::fstrcmp) {\n            fprintf(stderr, \"DEBUG serialize ICmp: fixing up value_cache_[%u] from %lu to %lu (op=%u)\\n\",\n                    info->l2, value_cache_[info->l2], (uint64_t)info->op2.i, l2_op);\n            value_cache_[info->l2] = val2 = info->op2.i;\n            is_special = true;\n          }\n          // Check if either operand contains indexOf operations\n          if ((info->l1 >= CONST_OFFSET && label_contains_indexof(info->l1)) ||\n              (info->l2 >= CONST_OFFSET && label_contains_indexof(info->l2))) {\n            is_special = true;\n          }\n          if (!is_special) {\n            throw z3::exception(\"value mismatch for ICmp\");\n          }\n        }\n        uint64_t icmp_result = eval_icmp(info->op >> 8, val1, val2, size) ? 1 : 0;\n        // fprintf(stderr, \"DEBUG serialize ICmp: recording value_cache_[%u] = %lu\\n\", l, icmp_result);\n        value_cache_.emplace_back(icmp_result);\n#endif\n        // Cache the expression AFTER updating value_cache to maintain consistency\n        // if an exception is thrown above\n        cache_expr(l, get_cmd(op1, op2, info->op >> 8));\n        break;\n      }\n      // concat\n      case __dfsan::Concat: {\n        // Check if either operand is a String (from fsubstr or string ops)\n        // We can't concat String with bitvector\n        if (!op1.is_bv() || !op2.is_bv()) {\n          // If one operand is String and the other is constant 0 (label 0),\n          // just use the String. The constant bytes don't contribute to constraints.\n          if (!op1.is_bv() && info->l2 == 0) {\n            cache_expr(l, op1);\n            RECORD_VALUE(val1);\n            break;\n          }\n          if (!op2.is_bv() && info->l1 == 0) {\n            cache_expr(l, op2);\n            RECORD_VALUE(val2);\n            break;\n          }\n          // fprintf(stderr, \"DEBUG Concat %u: l1=%u (sort=%s, is_bv=%d), l2=%u (sort=%s, is_bv=%d)\\n\",\n          //         l, info->l1, op1.get_sort().to_string().c_str(), op1.is_bv(),\n          //         info->l2, op2.get_sort().to_string().c_str(), op2.is_bv());\n          throw z3::exception(\"concat with non-bitvector operand (string op involved)\");\n        }\n        cache_expr(l, z3::concat(op2, op1)); // little endian\n        RECORD_VALUE((val2 << op1.get_sort().bv_size()) | (val1));\n        break;\n      }\n      default:\n        fprintf(stderr, \"WARNING: unsupported operator %u for label %u\\n\",\n                info->op & 0xff, l);\n        throw z3::exception(\"unsupported operator\");\n        break;\n    }\n  }\n\n  return get_cached_expr(label, deps);\n}\n\nint Z3AstParser::parse_cond(dfsan_label label, bool result, bool add_nested, std::vector<uint64_t> &tasks) {\n\n  if (label < CONST_OFFSET || label == __dfsan::kInitializingLabel || label >= size_) {\n    // invalid label\n    return -1;\n  }\n\n  // allocate a new task\n  auto task = std::make_shared<z3_task_t>();\n  try {\n    // reset has_fsize flag\n    has_fsize = false;\n\n    // parse last branch cond\n    input_dep_set_t inputs;\n    z3::expr cond = serialize(label, inputs);\n\n    // add negated last branch condition\n    z3::expr r = context_.bool_val(result);\n\n#if FILTER_WRONG_AST\n    // Skip validation for indexOf operations (op1 repurposed for haystack pointer)\n    bool contains_indexof = label_contains_indexof(label);\n\n    if (!contains_indexof && value_cache_[label] != result) {\n      // recalcuated value must match the recorded value\n      fprintf(stderr, \"WARNING: value mismatch for label %u: expected %lu, got %d\\n\",\n              label, value_cache_[label], result);\n      fprintf(stderr, \"cond: %s\\n\", cond.to_string().c_str());\n      dump_value_cache(label);\n      return -1;\n    }\n#endif\n\n    task->push_back((cond != r));\n\n    // collect additional input deps\n    collect_more_deps(inputs);\n\n    // add nested constraints\n    add_nested_constraints(inputs, task.get());\n\n    // save the task\n    tasks.push_back(save_task(task));\n\n    // save nested unless it's a fsize constraints\n    if (add_nested && !has_fsize) {\n      save_constraint(cond == r, inputs);\n    }\n\n    return 0; // success\n  } catch (z3::exception e) {\n    fprintf(stderr, \"WARNING: parsing error: %s\\n\", e.msg());\n  }\n\n  // exception happened, nothing added\n  return -1;\n}\n\nvoid Z3AstParser::construct_index_tasks(z3::expr &index, uint64_t curr,\n                                        uint64_t lb, uint64_t ub, uint64_t step,\n                                        z3_task_t &nested,\n                                        std::vector<uint64_t> &tasks) {\n\n  std::shared_ptr<z3_task_t> task = nullptr;\n\n  // enumerate indices\n  for (uint64_t i = lb; i < ub; i += step) {\n    if (i == curr) continue;\n    z3::expr idx = context_.bv_val(i, 64);\n    z3::expr e = (index == idx);\n    // allocate a new task\n    task = std::make_shared<z3_task_t>();\n    task->push_back(e);\n    // add nested constraints\n    task->insert(task->end(), nested.begin(), nested.end());\n    // save the task\n    tasks.push_back(save_task(task));\n  }\n}\n\nint Z3AstParser::parse_gep(dfsan_label ptr_label, uptr ptr, dfsan_label index_label, int64_t index,\n                           uint64_t num_elems, uint64_t elem_size, int64_t current_offset,\n                           bool enum_index, std::vector<uint64_t> &tasks) {\n\n  if (index_label < CONST_OFFSET ||\n      index_label == __dfsan::kInitializingLabel || index_label >= size_ ||\n      ptr_label == __dfsan::kInitializingLabel || ptr_label >= size_) {\n    // invalid label\n    return -1;\n  }\n\n  // early return if nothing to do\n  if (!enum_index || // if we are not enumerating the index\n      (num_elems == 0 && // if the GEP type is not an array,\n       // and we also don't have a pointer label\n       ptr_label == 0)) {\n    return 0;\n  }\n\n  try {\n    // prepare current index\n    uint16_t size = get_label_info(index_label)->size;\n    z3::expr r = context_.bv_val(index, size);\n\n    input_dep_set_t inputs;\n    z3::expr i = serialize(index_label, inputs);\n\n#if FILTER_WRONG_AST\n    if (value_cache_[index_label] != index) {\n      // recalculated value must match the recorded value\n      fprintf(stderr, \"WARNING: value mismatch for label %u: expected %ld, got %ld\\n\",\n              index_label, value_cache_[index_label], index);\n      return -1;\n    }\n#endif\n\n    // collect nested constraints\n    collect_more_deps(inputs);\n    z3_task_t nested_tasks;\n    add_nested_constraints(inputs, &nested_tasks);\n\n    // first, check against fixed array bounds if available\n    z3::expr idx = z3::zext(i, 64 - size);\n    if (num_elems > 0) {\n      construct_index_tasks(idx, index, 0, num_elems, 1, nested_tasks, tasks);\n    } else {\n      dfsan_label_info *bounds = get_label_info(ptr_label);\n      // fprintf(stderr, \"GEP bounds: lower=0x%lx, upper=0x%lx)\\n\",\n      //     bounds->op1.i, bounds->op2.i);\n      // if the array size is unknow, check bound info\n      if (bounds->op == __dfsan::Alloca ||\n          // due to async solving, we may have a Free op\n          bounds->op == __dfsan::Free) {\n        z3::expr es = context_.bv_val(elem_size, 64);\n        z3::expr co = context_.bv_val(current_offset, 64);\n        if (bounds->l2 == 0) {\n          // only perform index enumeration and bound check\n          // when the size of the buffer is fixed\n          z3::expr p = context_.bv_val(ptr, 64);\n          z3::expr np = idx * es + co + p;\n          construct_index_tasks(np, index, (uint64_t)bounds->op1.i,\n              (uint64_t)bounds->op2.i, elem_size, nested_tasks, tasks);\n        }\n      }\n    }\n\n    // always preserve\n    save_constraint(i == r, inputs);\n\n    return 0; // success\n  } catch (z3::exception e) {\n    // logf(\"WARNING: solving error: %s\\n\", e.msg());\n  }\n\n  // exception happened, nothing added\n  return -1;\n}\n\nint Z3AstParser::add_constraints(dfsan_label label, uint64_t result) {\n  if (label < CONST_OFFSET || label == __dfsan::kInitializingLabel || label >= size_) {\n    // invalid label\n    return -1;\n  }\n\n  try {\n    input_dep_set_t inputs;\n    z3::expr expr = serialize(label, inputs);\n    collect_more_deps(inputs);\n    // prepare result\n    uint16_t size = get_label_info(label)->size;\n    z3::expr r = context_.bv_val(result, size);\n    // add constraint\n    if (expr.is_bool()) r = context_.bool_val(result);\n\n#if FILTER_WRONG_AST\n    // double check if label is valid\n    if (value_cache_[label] != result) {\n      // recalculated value must match the recorded value\n      fprintf(stderr, \"WARNING: value mismatch for label %u: expected %ld, got %ld\\n\",\n              label, value_cache_[label], result);\n      return -1;\n    }\n#endif\n\n    save_constraint(expr == r, inputs);\n  } catch (z3::exception e) {\n    return -1;\n  }\n\n  return 0;\n}\n\nvoid Z3AstParser::save_constraint(z3::expr expr, input_dep_set_t &inputs) {\n  for (auto off : inputs) {\n    auto c = get_branch_dep(off);\n    if (c == nullptr) {\n      auto nc = std::make_unique<struct branch_dependency>();\n      c = nc.get();\n      set_branch_dep(off, std::move(nc));\n    }\n    if (c == nullptr) {\n      throw z3::exception(\"out of memory\");\n    } else {\n      c->input_deps.insert(inputs.begin(), inputs.end());\n      c->expr_deps.insert(expr);\n    }\n  }\n}\n\nvoid Z3AstParser::collect_more_deps(input_dep_set_t &inputs) {\n  // collect additional input deps\n  std::vector<offset_t> worklist;\n  worklist.insert(worklist.begin(), inputs.begin(), inputs.end());\n  while (!worklist.empty()) {\n    auto off = worklist.back();\n    worklist.pop_back();\n\n    auto deps = get_branch_dep(off);\n    if (deps != nullptr) {\n      for (auto &i : deps->input_deps) {\n        if (inputs.insert(i).second)\n          worklist.push_back(i);\n      }\n    }\n  }\n}\n\nsize_t Z3AstParser::add_nested_constraints(input_dep_set_t &inputs, z3_task_t *task) {\n  expr_set_t added;\n  for (auto &off : inputs) {\n    // fprintf(stderr, \"adding offset %d\\n\", off.second);\n    auto deps = get_branch_dep(off);\n    if (deps != nullptr) {\n      for (auto &expr : deps->expr_deps) {\n        if (added.insert(expr).second) {\n          // fprintf(stderr, \"adding expr: %s\\n\", expr.to_string().c_str());\n          task->push_back(expr);\n        }\n      }\n    }\n  }\n  return added.size();\n}\n\nZ3ParserSolver::solving_status\nZ3ParserSolver::solve_task(uint64_t task_id, unsigned timeout, solution_t &solutions) {\n  solving_status ret = unknown_error;\n  auto task = retrieve_task(task_id);\n  if (task == nullptr) {\n    return invalid_task;\n  }\n\n  try {\n    // setup global solver\n    // Use default solver to auto-detect theory (needed for string constraints)\n    z3::solver solver(context_);\n    solver.set(\"timeout\", timeout);\n    // solve the first constraint (optimistic)\n    z3::expr e = task->at(0);\n    solver.add(e);\n    // fprintf(stderr, \"DEBUG solve_task[%lu]: checking first constraint: %s\\n\", task_id, e.to_string().c_str());\n    z3::check_result res = solver.check();\n    // fprintf(stderr, \"DEBUG solve_task[%lu]: result = %d (sat=1, unsat=0, unknown=2)\\n\", task_id, (int)res);\n    if (res == z3::sat) {\n      ret = opt_sat;\n      // optimistic sat, save a model\n      z3::model m = solver.get_model();\n      // fprintf(stderr, \"DEBUG solve_task[%lu]: optimistic SAT model:\\n%s\\n\", task_id, m.to_string().c_str());\n      // check nested, if any\n      if (task->size() > 1) {\n        solver.push();\n        // add nested constraints\n        // fprintf(stderr, \"DEBUG solve_task[%lu]: adding %zu nested constraints\\n\", task_id, task->size() - 1);\n        for (size_t i = 1; i < task->size(); i++) {\n          // fprintf(stderr, \"DEBUG solve_task[%lu]: nested[%zu]: %s\\n\", task_id, i, task->at(i).to_string().c_str());\n          solver.add(task->at(i));\n        }\n        res = solver.check();\n        // fprintf(stderr, \"DEBUG solve_task[%lu]: nested result = %d (sat=1, unsat=0, unknown=2)\\n\", task_id, (int)res);\n        if (res == z3::sat) {\n          ret = nested_sat;\n          m = solver.get_model();\n          // fprintf(stderr, \"DEBUG solve_task[%lu]: nested SAT model:\\n%s\\n\", task_id, m.to_string().c_str());\n        } else if (res == z3::unsat) {\n          fprintf(stderr, \"WARNING: nested unsat for task %lu: %s\\n\",\n              task_id, solver.to_smt2().c_str());\n          ret = opt_sat_nested_unsat;\n        } else {\n          ret = opt_sat_nested_timeout;\n        }\n      } else {\n        ret = nested_sat; // XXX: upgrade to nested_sat?\n      }\n\n      // Check if model contains strlen symbols and optimize if needed\n      std::vector<std::pair<z3::expr, uint64_t>> strlen_vars; // (var, max_len)\n      const uint64_t MAX_STRLEN_EXTEND = 4096; // Reasonable max extension\n\n      for (unsigned i = 0; i < m.num_consts(); ++i) {\n        z3::func_decl decl = m.get_const_decl(i);\n        if (decl.name().kind() == Z3_STRING_SYMBOL &&\n            decl.name().str().find(\"strlen\") == 0) {\n          uint32_t input, offset, null_from_input;\n          uint64_t orig_len;\n          if (sscanf(decl.name().str().c_str(), strlen_name_format,\n                     &input, &offset, &orig_len, &null_from_input) == 4) {\n            z3::expr strlen_var = context_.constant(decl.name(), decl.range());\n            uint64_t max_len = orig_len + MAX_STRLEN_EXTEND;\n            strlen_vars.emplace_back(strlen_var, max_len);\n          }\n        }\n      }\n\n      if (!strlen_vars.empty()) {\n        // fprintf(stderr, \"DEBUG solve_task[%lu]: found %zu strlen variables, optimizing...\\n\", task_id, strlen_vars.size());\n        // Step 1: Try optimizer to minimize strlen values (no hard bounds)\n        z3::optimize opt(context_);\n        z3::params p(context_);\n        p.set(\"timeout\", timeout);\n        opt.set(p);\n\n        for (const auto &expr : *task) {\n          opt.add(expr);\n        }\n        for (const auto &sv : strlen_vars) {\n          // fprintf(stderr, \"DEBUG solve_task[%lu]: minimizing %s (max=%lu)\\n\", task_id, sv.first.to_string().c_str(), sv.second);\n          opt.minimize(sv.first);\n        }\n\n        bool use_optimized = false;\n        if (opt.check() == z3::sat) {\n          z3::model opt_model = opt.get_model();\n          // fprintf(stderr, \"DEBUG solve_task[%lu]: optimized SAT model:\\n%s\\n\", task_id, opt_model.to_string().c_str());\n          // Check if all strlen values are within bounds\n          bool all_within_bounds = true;\n          for (const auto &sv : strlen_vars) {\n            z3::expr val = opt_model.eval(sv.first, true);\n            uint64_t strlen_val = val.get_numeral_uint64();\n            if (strlen_val > sv.second) {\n              all_within_bounds = false;\n              break;\n            }\n          }\n          if (all_within_bounds) {\n            m = opt_model;\n            use_optimized = true;\n            // fprintf(stderr, \"DEBUG solve_task[%lu]: using optimized model (all within bounds)\\n\", task_id);\n          }\n          // else: optimized model exceeds bounds, fall back to bounded solver\n        }\n\n        // Step 2: If optimization failed or exceeded bounds, try solver with bound constraints\n        if (!use_optimized) {\n          // fprintf(stderr, \"DEBUG solve_task[%lu]: adding bound constraints and re-checking\\n\", task_id);\n          solver.push();\n          for (const auto &sv : strlen_vars) {\n            solver.add(z3::ule(sv.first, context_.bv_val(sv.second, sv.first.get_sort().bv_size())));\n          }\n          if (solver.check() == z3::sat) {\n            m = solver.get_model();\n            // fprintf(stderr, \"DEBUG solve_task[%lu]: bounded SAT model:\\n%s\\n\", task_id, m.to_string().c_str());\n          } else {\n            // Step 3: Unsolvable within bounds, skip\n            solver.pop();\n            return ret;\n          }\n          solver.pop();\n        }\n      }\n\n      generate_solution(m, solutions);\n      // fprintf(stderr, \"DEBUG solve_task[%lu]: after generate_solution, solutions.size() = %zu\\n\", task_id, solutions.size());\n    } else if (res == z3::unsat) {\n      // fprintf(stderr, \"DEBUG solve_task[%lu]: UNSAT\\n\", task_id);\n      ret = opt_unsat;\n    } else {\n      // fprintf(stderr, \"DEBUG solve_task[%lu]: TIMEOUT\\n\", task_id);\n      ret = opt_timeout;\n    }\n  } catch (z3::exception ze) {\n    fprintf(stderr, \"WARNING: solve_task[%lu]: EXCEPTION: %s\\n\", task_id, ze.msg());\n    ret = unknown_error;\n  }\n\n  // fprintf(stderr, \"DEBUG solve_task[%lu]: returning with ret=%d, solutions.size() = %zu\\n\", task_id, ret, solutions.size());\n  return ret;\n}\n\nvoid Z3ParserSolver::generate_solution(z3::model &m, solution_t &solutions) {\n  // from qsym\n  unsigned num_constants = m.num_consts();\n  // fprintf(stderr, \"DEBUG generate_solution: model has %u constants\\n\", num_constants);\n  for (unsigned i = 0; i < num_constants; i++) {\n    z3::func_decl decl = m.get_const_decl(i);\n    z3::expr e = m.get_const_interp(decl);\n    z3::symbol name = decl.name();\n\n    if (name.kind() == Z3_STRING_SYMBOL) {\n      // fprintf(stderr, \"DEBUG generate_solution: processing symbol '%s' = %s\\n\",\n      //         name.str().c_str(), e.to_string().c_str());\n      if (name.str().find(\"input\") == 0) {\n        uint32_t input;\n        uint32_t offset;\n        sscanf(name.str().c_str(), input_name_format, &input, &offset);\n        uint8_t value = (uint8_t)e.get_numeral_int();\n        // fprintf(stderr, \"DEBUG input-%u-%u: SET offset %u = 0x%02x (individual byte)\\n\",\n        //         input, offset, offset, value);\n        solutions.emplace_back(input, offset, value);\n      } else if (!name.str().compare(\"fsize\")) {\n        // FIXME:\n        // off_t size = (off_t)e.get_numeral_int64();\n        // if (size > input_size) { // grow\n        //   lseek(fd, size, SEEK_SET);\n        //   uint8_t dummy = 0;\n        //   write(fd, &dummy, sizeof(dummy));\n        // } else {\n        //   AOUT(\"truncate file to %ld\\n\", size);\n        //   ftruncate(fd, size);\n        // }\n        throw z3::exception(\"skip fsize constraints\");\n      } else if (name.str().find(\"atoi\") == 0) {\n        uint32_t input;\n        uint32_t offset;\n        int base;\n        uint64_t orig_len;\n        char buf[64];\n        int parsed = sscanf(name.str().c_str(), atoi_name_format, &input, &offset, &base, &orig_len);\n        if (parsed != 4) {\n          continue;\n        }\n        const char *format = NULL;\n        switch (base) {\n          case 2: format = \"%lb\"; break;\n          case 8: format = \"%lo\"; break;\n          case 10: format = \"%ld\"; break;\n          case 16: format = \"%lx\"; break;\n          default: throw z3::exception(\"unsupported base\");\n        }\n        // XXX: assumed signed\n        int new_len = snprintf(buf, 64, format, (int)e.get_numeral_int());\n\n        if ((uint64_t)new_len > orig_len) {\n          // Extending: insert extra digits\n          std::vector<uint8_t> insert_bytes(buf + orig_len, buf + new_len);\n          solutions.emplace_back(input, offset + (uint32_t)orig_len, std::move(insert_bytes));\n          // Set the common prefix\n          for (uint64_t i = 0; i < orig_len; ++i) {\n            solutions.emplace_back(input, offset + (uint32_t)i, (uint8_t)buf[i]);\n          }\n        } else if ((uint64_t)new_len < orig_len) {\n          // Shrinking: delete extra bytes\n          solutions.emplace_back(solution_op_t::DELETE, input,\n                                 offset + (uint32_t)new_len,\n                                 (uint32_t)(orig_len - new_len));\n          // Set the new digits\n          for (int i = 0; i < new_len; ++i) {\n            solutions.emplace_back(input, offset + i, (uint8_t)buf[i]);\n          }\n        } else {\n          // Same length: just set the digits\n          for (int i = 0; i < new_len; ++i) {\n            solutions.emplace_back(input, offset + i, (uint8_t)buf[i]);\n          }\n        }\n        // Set null terminator at the new end\n        solutions.emplace_back(input, offset + new_len, (uint8_t)0);\n      } else if (name.str().find(\"strlen\") == 0) {\n        uint32_t input;\n        uint32_t offset;\n        uint64_t orig_len;\n        uint32_t null_from_input;\n        if (sscanf(name.str().c_str(), strlen_name_format,\n                   &input, &offset, &orig_len, &null_from_input) != 4) {\n          throw z3::exception(\"malformed strlen symbol name\");\n        }\n\n        uint64_t target_len = e.get_numeral_uint64();\n        // fprintf(stderr, \"DEBUG generate_solution: strlen-%u-%u: orig=%lu, target=%lu, null_from_input=%u\\n\",\n        //         input, offset, orig_len, target_len, null_from_input);\n\n        if (target_len > orig_len) {\n          // Extending: insert bytes to make the string longer\n          uint64_t extend_by = target_len - orig_len;\n          std::vector<uint8_t> fill_bytes(extend_by, 'A');\n          solutions.emplace_back(input, offset + (uint32_t)orig_len, std::move(fill_bytes));\n          // For plain strings (null_from_input=1), add null terminator at new end\n          // For structured formats (null_from_input=0), delimiter handles termination\n          if (null_from_input) {\n            solutions.emplace_back(input, offset + (uint32_t)target_len, (uint8_t)0);\n          }\n        } else if (target_len < orig_len) {\n          // Shrinking: delete bytes to make the string shorter\n          uint64_t shrink_by = orig_len - target_len;\n          solutions.emplace_back(solution_op_t::DELETE, input,\n                                 offset + (uint32_t)target_len,\n                                 (uint32_t)shrink_by);\n        }\n        // target_len == orig_len: no change needed\n      } else if (name.str().find(\"str-\") == 0) {\n        // String variable from strchr/strstr: str-input-offset-len\n        // Extract byte values from the string and generate solutions\n        // Handle length changes with INSERT/DELETE like strlen does\n        uint32_t input;\n        uint32_t offset;\n        uint32_t orig_len;\n        if (sscanf(name.str().c_str(), \"str-%u-%u-%u\", &input, &offset, &orig_len) != 3) {\n          continue;  // Skip malformed string variable\n        }\n\n        // Get the string value from Z3 and decode escape sequences\n        if (e.is_string_value()) {\n          std::string raw_str = e.get_string();\n          std::vector<uint8_t> bytes = decode_z3_string(raw_str);\n          uint32_t new_len = bytes.size();\n          fprintf(stderr, \"DEBUG generate_solution: str-%u-%u-%u: orig=%u, new=%u, raw='%s'\\n\",\n                  input, offset, orig_len, orig_len, new_len, raw_str.c_str());\n\n          if (new_len > orig_len) {\n            // Extending: set common prefix, then insert extra bytes\n            for (uint32_t j = 0; j < orig_len; j++) {\n              solutions.emplace_back(input, offset + j, bytes[j]);\n            }\n            // Insert the extra bytes after the original range\n            std::vector<uint8_t> insert_bytes(bytes.begin() + orig_len, bytes.end());\n            solutions.emplace_back(input, offset + orig_len, std::move(insert_bytes));\n          } else if (new_len < orig_len) {\n            // Shrinking: set new content, then delete extra bytes\n            for (uint32_t j = 0; j < new_len; j++) {\n              solutions.emplace_back(input, offset + j, bytes[j]);\n            }\n            // Delete the bytes we no longer need\n            solutions.emplace_back(solution_op_t::DELETE, input,\n                                   offset + new_len,\n                                   orig_len - new_len);\n          } else {\n            // Same length: just set all bytes\n            for (uint32_t j = 0; j < new_len; j++) {\n              solutions.emplace_back(input, offset + j, bytes[j]);\n            }\n          }\n        }\n      } else if (name.str().find(\"strrchr_idx_\") == 0 ||\n                 name.str().find(\"strchr_idx_\") == 0) {\n        // Index variables from strchr/strrchr - skip, they're intermediate\n        continue;\n      } else {\n        // Skip unknown symbols - Z3 string theory creates internal variables\n        continue;\n      }\n    }\n  }\n\n  // Post-process solutions: replace null bytes (0x00) with non-null placeholder ('A')\n  // for bytes within string ranges. Z3 doesn't model C null-termination so may put\n  // nulls before the target character position.\n\n  // // Debug: print string ranges\n  // fprintf(stderr, \"DEBUG generate_solution: string_ranges_ has %zu entries\\n\", string_ranges_.size());\n  // for (const auto &entry : string_ranges_) {\n  //   fprintf(stderr, \"DEBUG generate_solution: input %u has %zu ranges\\n\", entry.first, entry.second.size());\n  //   for (const auto &range : entry.second) {\n  //     fprintf(stderr, \"DEBUG generate_solution:   range [%u, %u)\\n\", range.first, range.second);\n  //   }\n  // }\n\n  // Replace null bytes within string ranges (tracked in string_ranges_)\n  for (auto &sol : solutions) {\n    if (sol.op == solution_op_t::SET && sol.val == 0x00) {\n      auto it = string_ranges_.find(sol.id);\n      if (it != string_ranges_.end()) {\n        for (const auto &range : it->second) {\n          // If this offset is within a string range (but not at the end), replace null\n          if (sol.offset >= range.first && sol.offset < range.second) {\n            // fprintf(stderr, \"DEBUG generate_solution: replacing null at offset %u (in range [%u,%u))\\n\",\n            //         sol.offset, range.first, range.second);\n            sol.val = 'A';  // Replace null with 'A'\n            break;\n          }\n        }\n      }\n    }\n  }\n\n  // fprintf(stderr, \"DEBUG generate_solution: finished with %zu solutions\\n\", solutions.size());\n}\n\n// Build Z3 string from a content label (Load or Concat of bytes)\n// Creates a symbolic string variable with naming convention: str-input-offset-len\nz3::expr Z3AstParser::build_string_from_label(dfsan_label label, input_dep_set_t &deps) {\n  if (label < CONST_OFFSET) {\n    throw z3::exception(\"Invalid string label\");  // No tainted content\n  }\n\n  dfsan_label_info *info = get_label_info(label);\n\n  // Handle Load: multi-byte load from input - create a single symbolic string\n  if (info->op == __dfsan::Load) {\n    uint32_t offset = get_label_info(info->l1)->op1.i;\n    uint32_t input = get_label_info(info->l1)->op2.i;\n    uint32_t len = info->l2;  // number of bytes loaded\n\n    // Track string range for null-byte post-processing\n    string_ranges_[input].emplace_back(offset, offset + len);\n\n    // Add dependencies for all bytes in the range\n    for (uint32_t i = 0; i < len; i++) {\n      deps.insert(std::make_pair(input, offset + i));\n    }\n\n    // Create a single symbolic string variable: str-input-offset-len\n    char name[256];\n    snprintf(name, sizeof(name), \"str-%u-%u-%u\", input, offset, len);\n    z3::symbol symbol = context_.str_symbol(name);\n    z3::expr str_var = context_.constant(symbol, context_.string_sort());\n\n    // Cache string info for this label\n    string_info_cache_[label] = {input, offset, len};\n\n    return str_var;\n  }\n\n  // Handle fsubstr and fstrcat: these ops cache String expressions\n  if (is_content_string_op(info->op)) {\n    // Should be cached from earlier processing\n    return get_cached_expr(label, deps);\n  }\n\n  // Handle fstr_off: string op pointer + constant offset (from GEP)\n  // l1 = string op label (fstrchr, etc.), op2 = byte offset\n  if (info->op == __dfsan::fstr_off) {\n    if (info->l1 == 0) {\n      throw z3::exception(\"fstr_off with constant l1\");\n    }\n    dfsan_label_info *str_op_info = get_label_info(info->l1);\n\n    // The string op's l1 is the base string content\n    if (str_op_info->l1 >= CONST_OFFSET) {\n      int64_t gep_offset = (int64_t)info->op2.i;\n\n      // Get concrete values to check if we're beyond the end\n      // Bounds check: value_cache_ is indexed by label\n      if (info->l1 >= value_cache_.size()) {\n        throw z3::exception(\"fstr_off label out of value cache bounds\");\n      }\n      int64_t str_op_pos = (int64_t)value_cache_[info->l1];  // position of found char\n      int64_t concrete_start = str_op_pos + gep_offset;\n\n      // Build haystack string FIRST - this populates string_info_cache_\n      dfsan_label content_label = str_op_info->l1;\n      z3::expr haystack = build_string_from_label(content_label, deps);\n\n      // Get string info directly from cache (populated by build_string_from_label)\n      auto it = string_info_cache_.find(content_label);\n      if (it == string_info_cache_.end()) {\n        throw z3::exception(\"string info not found in cache for fstr_off\");\n      }\n      uint32_t input_id = it->second.input_id;\n      uint32_t base_offset = it->second.offset;\n      uint32_t haystack_len = it->second.length;\n\n      // fprintf(stderr, \"DEBUG build_string_from_label fstr_off: content_label=%u, haystack_len=%u, input_id=%u, base_offset=%u, concrete_start=%ld\\n\",\n      //         content_label, haystack_len, input_id, base_offset, concrete_start);\n\n      // Check if start is beyond the end of the haystack\n      if (concrete_start >= (int64_t)(base_offset + haystack_len)) {\n        // Beyond end: create a new insertion point variable\n        // str-<input>-<offset>-0 means \"string at offset with no original content\"\n        // fprintf(stderr, \"DEBUG build_string_from_label: fstr_off beyond end, creating insertion point %s\\\n           -n\", name);\n        char name[256];\n        snprintf(name, sizeof(name), \"str-%u-%ld-0\", input_id, concrete_start);\n        z3::symbol symbol = context_.str_symbol(name);\n        z3::expr str_var = context_.constant(symbol, context_.string_sort());\n\n        // Don't add dependency for insertion point - it's beyond file bounds\n        return str_var;\n      }\n\n      // Within bounds: use original suffix extraction\n      z3::expr idx_expr = get_cached_expr(info->l1, deps);\n      z3::expr suffix_start = idx_expr + (int)gep_offset;\n      z3::expr haystack_len_expr(context_, Z3_mk_seq_length(context_, haystack));\n      z3::expr suffix_len = haystack_len_expr - idx_expr - (int)gep_offset;\n\n      return z3::to_expr(context_, Z3_mk_seq_extract(context_,\n                                                     haystack,\n                                                     suffix_start,\n                                                     suffix_len));\n    }\n    throw z3::exception(\"invalid str_op for fstr_off\");\n  }\n\n  // Handle string search ops (fstrchr, fstrrchr, fstrstr, fstrpbrk):\n  // These labels represent pointer results. When used as content directly\n  // (without GEP offset), we build content at the found position.\n  if (is_indexof_op(info->op)) {\n    if (info->l1 >= CONST_OFFSET) {\n      // Get the index expression for this string op (if already cached)\n      z3::expr idx_expr = get_cached_expr(label, deps);\n\n      // Build the full haystack string\n      z3::expr haystack = build_string_from_label(info->l1, deps);\n\n      // Create suffix starting at the found position (no offset)\n      // substr(haystack, idx, len-idx) - content from found position to end\n      z3::expr haystack_len(context_, Z3_mk_seq_length(context_, haystack));\n      z3::expr suffix_len = haystack_len - idx_expr;\n\n      return z3::to_expr(context_, Z3_mk_seq_extract(context_,\n                                                      haystack,\n                                                      idx_expr,\n                                                      suffix_len));\n    }\n    throw z3::exception(\"invalid l1 for fstr_op\");\n  }\n\n  // Handle Concat: check if it's a chain of consecutive input bytes\n  // If so, create a single string variable for the whole range\n  if (info->op == __dfsan::Concat) {\n    // Try to find the range of consecutive input bytes\n    uint32_t min_offset = UINT32_MAX;\n    uint32_t max_offset = 0;\n    uint32_t input_id = UINT32_MAX;\n    bool is_consecutive = true;\n    std::vector<uint32_t> offsets;\n\n    // Helper lambda to collect offsets from a label\n    std::function<void(dfsan_label)> collect_offsets = [&](dfsan_label label) {\n      if (!is_consecutive) return;\n      if (label < CONST_OFFSET) {\n        // Concrete byte in the chain - mark as non-consecutive\n        is_consecutive = false;\n        return;\n      }\n      dfsan_label_info *linfo = get_label_info(label);\n      if (linfo->op == 0) {\n        // Single input byte\n        uint32_t off = linfo->op1.i;\n        uint32_t inp = linfo->op2.i;\n        if (input_id == UINT32_MAX) input_id = inp;\n        else if (input_id != inp) { is_consecutive = false; return; }\n        offsets.push_back(off);\n        if (off < min_offset) min_offset = off;\n        if (off > max_offset) max_offset = off;\n      } else if (linfo->op == __dfsan::Concat) {\n        collect_offsets(linfo->l1);\n        collect_offsets(linfo->l2);\n      } else {\n        // Other operation - not a simple byte chain\n        is_consecutive = false;\n      }\n    };\n\n    collect_offsets(label);\n\n    // Check if offsets are truly consecutive\n    if (is_consecutive && !offsets.empty() && input_id != UINT32_MAX) {\n      std::sort(offsets.begin(), offsets.end());\n      for (size_t i = 1; i < offsets.size(); i++) {\n        if (offsets[i] != offsets[i-1] + 1) {\n          is_consecutive = false;\n          break;\n        }\n      }\n    }\n\n    if (is_consecutive && !offsets.empty()) {\n      // Create a single string variable for the whole range\n      uint32_t len = offsets.size();\n      uint32_t start_offset = offsets[0];\n\n      // Track string range for null-byte post-processing\n      string_ranges_[input_id].emplace_back(start_offset, start_offset + len);\n\n      // Add dependencies for all bytes\n      for (uint32_t off : offsets) {\n        deps.insert(std::make_pair(input_id, off));\n      }\n\n      // Create single symbolic string variable\n      char name[256];\n      snprintf(name, sizeof(name), \"str-%u-%u-%u\", input_id, start_offset, len);\n      z3::symbol symbol = context_.str_symbol(name);\n\n      // Cache string info for this label\n      string_info_cache_[label] = {input_id, start_offset, len};\n\n      return context_.constant(symbol, context_.string_sort());\n    }\n\n    // Fall back to recursive concatenation if not consecutive\n    z3::expr left(context_);\n    z3::expr right(context_);\n\n    if (info->l1 >= CONST_OFFSET) {\n      left = build_string_from_label(info->l1, deps);\n    } else {\n      char c = (char)(info->op1.i & 0xff);\n      left = context_.string_val(std::string(1, c));\n    }\n\n    if (info->l2 >= CONST_OFFSET) {\n      right = build_string_from_label(info->l2, deps);\n    } else {\n      char c = (char)(info->op2.i & 0xff);\n      right = context_.string_val(std::string(1, c));\n    }\n\n    // Try to cache combined string info from children for downstream lookups\n    // Use left child's info as base (it comes first in the concat)\n    if (info->l1 >= CONST_OFFSET) {\n      auto it = string_info_cache_.find(info->l1);\n      if (it != string_info_cache_.end()) {\n        uint32_t combined_len = it->second.length;\n        // Add right child's length if available\n        if (info->l2 >= CONST_OFFSET) {\n          auto it2 = string_info_cache_.find(info->l2);\n          if (it2 != string_info_cache_.end()) {\n            combined_len += it2->second.length;\n          }\n        } else {\n          combined_len += 1; // concrete byte\n        }\n        string_info_cache_[label] = {it->second.input_id, it->second.offset, combined_len};\n      }\n    }\n\n    return z3::concat(left, right);\n  }\n\n  // Handle single input byte (op == 0)\n  if (info->op == 0) {\n    uint32_t offset = info->op1.i;\n    uint32_t input = info->op2.i;\n\n    // Track string range for null-byte post-processing (single byte)\n    string_ranges_[input].emplace_back(offset, offset + 1);\n    deps.insert(std::make_pair(input, offset));\n\n    // Create a single-char symbolic string\n    char name[256];\n    snprintf(name, sizeof(name), \"str-%u-%u-%u\", input, offset, 1);\n    z3::symbol symbol = context_.str_symbol(name);\n\n    // Cache string info for this label\n    string_info_cache_[label] = {input, offset, 1};\n\n    return context_.constant(symbol, context_.string_sort());\n  }\n\n  // Last resort: empty string\n  return context_.string_val(\"\");\n}\n\n// Get byte expression for a specific input offset\nz3::expr Z3AstParser::get_byte_expr(uint32_t input, uint32_t offset, input_dep_set_t &deps) {\n  deps.insert(std::make_pair(input, offset));\n  char name[256];\n  snprintf(name, sizeof(name), input_name_format, input, offset);\n  z3::symbol symbol = context_.str_symbol(name);\n  return context_.constant(symbol, context_.bv_sort(8));\n}\n"
  },
  {
    "path": "solvers/z3.cpp",
    "content": "#include \"sanitizer_common/sanitizer_common.h\"\n#include \"sanitizer_common/sanitizer_file.h\"\n#include \"sanitizer_common/sanitizer_posix.h\"\n#include \"dfsan/dfsan.h\"\n\n#include \"parse-z3.h\"\n\n#include <z3++.h>\n\n#include <unordered_map>\n#include <unordered_set>\n#include <utility>\n#include <vector>\n\n#define OPTIMISTIC 1\n\nusing namespace __dfsan;\n\n// for output\nstatic const char* __output_dir;\nstatic uint32_t __instance_id;\nstatic uint32_t __session_id;\nstatic uint32_t __current_index = 0;\nstatic z3::context __z3_context;\nstatic z3::solver __z3_solver(__z3_context, \"QF_BV\");\nstatic symsan::Z3ParserSolver *__z3_parser = nullptr;\n\n// filter?\nSANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL uint32_t __taint_trace_callstack;\n\nstatic std::unordered_set<dfsan_label> __solved_labels;\ntypedef std::pair<uint32_t, void*> trace_context;\nstruct context_hash {\n  std::size_t operator()(const trace_context &context) const {\n    return std::hash<uint32_t>{}(context.first) ^ std::hash<void*>{}(context.second);\n  }\n};\nstatic std::unordered_map<trace_context, uint16_t, context_hash> __branches;\nstatic const uint16_t MAX_BRANCH_COUNT = 16;\nstatic const uint64_t MAX_GEP_INDEX = 0x10000;\nstatic std::unordered_set<uptr> __buffers;\n\n\nstatic void generate_input(symsan::Z3ParserSolver::solution_t &solutions) {\n  using op_t = symsan::Z3ParserSolver::solution_op_t;\n\n  if (tainted.is_stdin) {\n    // FIXME: input is stdin\n    AOUT(\"WARNING: original input is stdin\");\n    return;\n  }\n\n  // Build the new input in memory to handle INSERT/DELETE properly\n  std::vector<uint8_t> new_input((uint8_t*)tainted.buf,\n                                  (uint8_t*)tainted.buf + tainted.size);\n\n  // Sort solutions by offset in descending order so INSERT/DELETE don't\n  // invalidate subsequent offsets\n  std::vector<uptr> order(solutions.size());\n  for (uptr i = 0; i < order.size(); ++i) order[i] = i;\n  Sort(order.data(), order.size(), [&solutions](uptr a, uptr b) {\n    return solutions[a].offset > solutions[b].offset;\n  });\n\n  for (uptr idx : order) {\n    const auto& sol = solutions[idx];\n    switch (sol.op) {\n      case op_t::SET:\n        if (sol.offset < new_input.size()) {\n          AOUT(\"SET offset %d = %x\\n\", sol.offset, sol.val);\n          new_input[sol.offset] = sol.val;\n        }\n        break;\n\n      case op_t::INSERT:\n        if (sol.offset <= new_input.size()) {\n          AOUT(\"INSERT %zu bytes at offset %d\\n\", sol.data.size(), sol.offset);\n          new_input.insert(new_input.begin() + sol.offset,\n                          sol.data.begin(), sol.data.end());\n        }\n        break;\n\n      case op_t::DELETE:\n        if (sol.offset < new_input.size()) {\n          uptr del_len = sol.len;\n          if (sol.offset + del_len > new_input.size())\n            del_len = new_input.size() - sol.offset;\n          AOUT(\"DELETE %zu bytes at offset %d\\n\", del_len, sol.offset);\n          new_input.erase(new_input.begin() + sol.offset,\n                         new_input.begin() + sol.offset + del_len);\n        }\n        break;\n    }\n  }\n\n  // Write the new input to file\n  char path[PATH_MAX];\n  internal_snprintf(path, PATH_MAX, \"%s/id-%d-%d-%d\", __output_dir,\n                    __instance_id, __session_id, __current_index++);\n  fd_t fd = OpenFile(path, WrOnly);\n  if (fd == kInvalidFd) {\n    AOUT(\"WARNING: failed to open new input file for write\");\n    return;\n  }\n\n  AOUT(\"generate #%d output (size: %zu -> %zu)\\n\",\n       __current_index - 1, tainted.size, new_input.size());\n\n  if (!WriteToFile(fd, new_input.data(), new_input.size())) {\n    AOUT(\"WARNING: failed to write new input\\n\");\n  }\n\n  CloseFile(fd);\n}\n\nstatic inline bool __solve_task(uint64_t task_id) {\n  symsan::Z3ParserSolver::solution_t solutions;\n  auto status = __z3_parser->solve_task(task_id, 5000U, solutions);\n  if (solutions.size() != 0) {\n    generate_input(solutions);\n    return true;\n  } else {\n    return false;\n  }\n}\n\nstatic struct switch_true_case {\n  dfsan_label label;\n  uint32_t cid;\n} __switch_true_case = {0};\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_cmp(dfsan_label op1, dfsan_label op2, uint32_t size, uint32_t predicate,\n                  uint64_t c1, uint64_t c2, uint32_t cid) {\n  if ((op1 == 0 && op2 == 0))\n    return;\n\n  void *addr = __builtin_return_address(0);\n  auto itr = __branches.find({__taint_trace_callstack, addr});\n  if (itr == __branches.end()) {\n    itr = __branches.insert({{__taint_trace_callstack, addr}, 1}).first;\n  } else if (itr->second < MAX_BRANCH_COUNT) {\n    itr->second += 1;\n  } else {\n    return;\n  }\n\n  AOUT(\"solving cmp: %u %u %u %d %lu %lu 0x%x @%p\\n\",\n       op1, op2, size, predicate, c1, c2, cid, addr);\n\n  dfsan_label temp = dfsan_union(op1, op2, (predicate << 8) | ICmp, size, c1, c2);\n  uint8_t r = get_const_result(c1, c2, predicate);\n\n  if (__solved_labels.count(temp) != 0)\n    return;\n\n  if (r) {\n    // for the true case, we want to save it to solve the last,\n    // so the nested constraint will not affect other cases\n    __switch_true_case.label = temp;\n    __switch_true_case.cid = cid;\n    return;\n  }\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_cond(temp, r, 0, tasks)) {\n    AOUT(\"WARNING: failed to parse cmp %d @%p\\n\", temp, addr);\n    return;\n  }\n\n  for (auto id : tasks) {\n    // solve\n    if (__solve_task(id)) {\n      AOUT(\"cmp solved\\n\");\n    } else {\n      AOUT(\"cmp not solvable @%p\\n\", addr);\n    }\n  }\n\n  // mark as flipped\n  __solved_labels.insert(temp);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_switch_end(uint32_t cid) {\n  if (__switch_true_case.label == 0) {\n    // filtering should have been done before\n    return;\n  } else if (__switch_true_case.cid != cid) {\n    AOUT(\"WARNING: switch end cid mismatch %u vs %u\\n\",\n         __switch_true_case.cid, cid);\n    return;\n  }\n\n  void *addr = __builtin_return_address(0);\n  dfsan_label label = __switch_true_case.label;\n\n  AOUT(\"solving switch end: %u 0x%x @%p\\n\", label, cid, addr);\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_cond(label, 1, 1, tasks)) {\n    AOUT(\"WARNING: failed to parse cmp %d @%p\\n\", label, addr);\n    return;\n  }\n\n  for (auto id : tasks) {\n    // solve\n    if (__solve_task(id)) {\n      AOUT(\"cmp solved\\n\");\n    } else {\n      AOUT(\"cmp not solvable @%p\\n\", addr);\n    }\n  }\n\n  // mark as flipped\n  __solved_labels.insert(label);\n  // reset the switch label\n  __switch_true_case.label = 0;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_cond(dfsan_label label, bool r, uint8_t flag, uint32_t cid) {\n  if (label == 0) {\n    // check for real loop exit\n    if (!(((flag & FalseBranchLoopExit) && !r) ||\n          ((flag & TrueBranchLoopExit) && r)))\n      return;\n  }\n\n  void *addr = __builtin_return_address(0);\n  auto itr = __branches.find({__taint_trace_callstack, addr});\n  if (itr == __branches.end()) {\n    itr = __branches.insert({{__taint_trace_callstack, addr}, 1}).first;\n  } else if (itr->second < MAX_BRANCH_COUNT) {\n    itr->second += 1;\n  } else {\n    return;\n  }\n\n  AOUT(\"solving cond: %u %u 0x%x 0x%x %p %u\\n\",\n       label, r, __taint_trace_callstack, cid, addr, itr->second);\n\n  if (__solved_labels.count(label) != 0)\n    return;\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_cond(label, r, true, tasks)) {\n    AOUT(\"WARNING: failed to parse condition %d @%p\\n\", label, addr);\n    return;\n  }\n\n  for (auto id : tasks) {\n    // solve\n    if (__solve_task(id)) {\n      AOUT(\"branch solved\\n\");\n    } else {\n      AOUT(\"branch not solvable @%p\\n\", addr);\n    }\n  }\n\n  // mark as flipped\n  __solved_labels.insert(label);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label\n__taint_trace_select(dfsan_label cond_label, dfsan_label true_label,\n                     dfsan_label false_label, uint8_t r, uint8_t true_op,\n                     uint8_t false_op, uint32_t cid) {\n  if (cond_label == 0)\n    return r ? true_label : false_label;\n\n  void *addr = __builtin_return_address(0);\n  auto itr = __branches.find({__taint_trace_callstack, addr});\n  if (itr == __branches.end()) {\n    itr = __branches.insert({{__taint_trace_callstack, addr}, 1}).first;\n  } else if (itr->second < MAX_BRANCH_COUNT) {\n    itr->second += 1;\n  } else {\n    return r ? true_label : false_label;\n  }\n\n  AOUT(\"solving select: %u %u %u %u %u %u 0x%x @%p\\n\",\n       cond_label, true_label, false_label, r, true_op, false_op, cid, addr);\n\n  // check if it's actually a logical AND: select cond, label, false\n  dfsan_label solving_label = 0, ret_label = 0;\n  uint8_t solving_r = 0;\n  if (true_label != 0 && false_op == 0) {\n    solving_label = dfsan_union(cond_label, true_label, And, 1, r, true_op);\n    solving_r = (r && true_op) ? 1 : 0;\n    ret_label = solving_label;\n  } else if (false_label != 0 && true_op == 1) {\n    // logical OR: select cond, true, label\n    solving_label = dfsan_union(cond_label, false_label, Or, 1, r, false_op);\n    solving_r = (r || false_op) ? 1 : 0;\n    ret_label = solving_label;\n  } else {\n    // normal select?\n    AOUT(\"normal select?!\\n\");\n    solving_label = cond_label;\n    solving_r = r;\n    ret_label = r ? true_label : false_label;\n  }\n\n  if (__solved_labels.count(solving_label) != 0)\n    return ret_label;\n\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_cond(solving_label, solving_r, true, tasks)) {\n    AOUT(\"WARNING: failed to parse condition %d @%p\\n\", solving_label, addr);\n    return ret_label;\n  }\n\n  for (auto id : tasks) {\n    // solve\n    if (__solve_task(id)) {\n      AOUT(\"branch solved\\n\");\n    } else {\n      AOUT(\"branch not solvable @%p\\n\", addr);\n    }\n  }\n\n  // mark as flipped\n  __solved_labels.insert(solving_label);\n\n  return ret_label;\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_indcall(dfsan_label label) {\n  if (label == 0)\n    return;\n\n  AOUT(\"tainted indirect call target: %d\\n\", label);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_gep(dfsan_label ptr_label, uint64_t ptr, dfsan_label index_label,\n                  int64_t index, uint64_t num_elems, uint64_t elem_size,\n                  int64_t current_offset, uint32_t cid) {\n  if (index_label == 0)\n    return;\n\n  if (__solved_labels.count(index_label) != 0)\n    return;\n\n  if (__buffers.count(ptr) != 0)\n    return;\n\n  AOUT(\"tainted GEP index: %ld = %d, ne: %ld, es: %ld, offset: %ld\\n\",\n      index, index_label, num_elems, elem_size, current_offset);\n\n  void *addr = __builtin_return_address(0);\n  std::vector<uint64_t> tasks;\n  if (__z3_parser->parse_gep(ptr_label, ptr, index_label, index, num_elems,\n                             elem_size, current_offset, true, tasks)) {\n    AOUT(\"WARNING: failed to parse gep %d @%p\\n\", index_label, addr);\n    return;\n  }\n\n  for (auto id : tasks) {\n    // solve\n    if (__solve_task(id)) {\n      AOUT(\"gep solved\\n\");\n    } else {\n      AOUT(\"gep not solvable @%p\\n\", addr);\n    }\n  }\n\n  // mark as visited\n  __solved_labels.insert(index_label);\n  __buffers.insert(ptr);\n\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_offset(dfsan_label offset_label, int64_t offset, unsigned size) {\n  if (offset_label == 0)\n    return;\n\n  if (__solved_labels.count(offset_label) != 0)\n    return;\n\n  if (__z3_parser->add_constraints(offset_label, offset) != 0) {\n    Report(\"WARNING: adding constraints error\\n\");\n  }\n\n  __solved_labels.insert(offset_label);\n}\n\nextern \"C\" SANITIZER_INTERFACE_ATTRIBUTE void\n__taint_trace_memcmp(dfsan_label label) {\n  if (label == 0)\n    return;\n\n  dfsan_label_info *info = dfsan_get_label_info(label);\n\n  AOUT(\"tainted memcmp: %d, size: %d\\n\", label, info->size);\n\n  // If both operands are symbolic, no concrete content to cache\n  if ((info->l1 != CONST_LABEL && info->l2 != CONST_LABEL) || info->size == 0)\n    return;\n\n  uint8_t *content_ptr = (info->l1 == CONST_LABEL) ? (uint8_t*)info->op1.i\n                                                   : (uint8_t*)info->op2.i;\n  // Cache the concrete content for later solving, concrete oprand is always in op1\n  __z3_parser->record_memcmp(label, content_ptr, info->size);\n}\n\nextern \"C\" void InitializeSolver() {\n  __output_dir = flags().output_dir;\n  __instance_id = flags().instance_id;\n  __session_id = flags().session_id;\n  __z3_parser = new symsan::Z3ParserSolver((void*)UnionTableAddr(), uniontable_size, __z3_context);\n  std::vector<symsan::input_t> inputs;\n  inputs.push_back({(u8*)tainted.buf, tainted.size});\n  __z3_parser->restart(inputs);\n}\n"
  },
  {
    "path": "tests/CMakeLists.txt",
    "content": "configure_file(lit.site.cfg.in\n  ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg\n  @ONLY)\n\n"
  },
  {
    "path": "tests/aggregate.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\ntypedef struct {\n  int64_t x;\n  int64_t y;\n} point_struct;\n\npoint_struct __attribute__((noinline)) bar(int32_t x, int32_t y) {\n    point_struct p;\n    p.x = x;\n    p.y = y;\n    return p;\n}\n\nint __attribute__((noinline)) foo(int32_t y) {\n    point_struct p = bar(y, y + 1);\n    return p.x * p.y == 2;\n}\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int32_t x = 0;\n\n  memcpy(&x, buf + 1, 4); // x 0 - 1\n\n  if (foo(x) == 1) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  } else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/atoi_test.c",
    "content": "// Test: atoi constraints for extending and shrinking digit strings\n// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: printf '999' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 KO_DONT_OPTIMIZE=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s <input_file>\\n\", argv[0]);\n    return 1;\n  }\n\n  char buf[64];\n  FILE *f = fopen(argv[1], \"r\");\n  if (!f) {\n    perror(\"fopen\");\n    return 1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, f);\n  buf[n] = '\\0';\n  fclose(f);\n\n  int val = atoi(buf);\n  printf(\"atoi returned: %d\\n\", val);\n\n  // Test shrinking: 999 -> 42 (need fewer digits)\n  if (val == 42) {\n    // CHECK-GEN1: SHRINK-SUCCESS\n    printf(\"SHRINK-SUCCESS: val == 42\\n\");\n  }\n\n  // Test extending: 999 -> 12345 (need more digits)\n  if (val == 12345) {\n    // CHECK-GEN2: EXTEND-SUCCESS\n    printf(\"EXTEND-SUCCESS: val == 12345\\n\");\n  }\n\n  // Original input (999) hits neither branch\n  if (val != 42 && val != 12345) {\n    // CHECK-ORIG: NEITHER\n    printf(\"NEITHER: val = %d\\n\", val);\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/atomicrmw.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdatomic.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  size_t ret;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint32_t x = 0;\n  uint32_t y = 0;\n  memcpy(&x, buf, 4);\n  memcpy(&y, buf + 4, 4);\n\n  int orig = __atomic_fetch_add(&x, y, __ATOMIC_RELAXED);\n  if(orig == 2 && y == 1) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/bitflip.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  size_t ret;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint32_t x = 0;\n  memcpy(&x, buf, 4);\n\n  if ((1 & (x >> 29)) &&\n      !(1 & (x >> 28)) &&\n      (1 & (x >> 27)) &&\n      (1 & (x >> 26)) &&\n      !(1 & (x >> 25)) &&\n      (1 & (x >> 24)) &&\n      (1 & (x >> 23)) &&\n      (1 & (x >> 22)) &&\n      !(1 & (x >> 21)) &&\n      !(1 & (x >> 20)) &&\n      (1 & (x >> 19)) &&\n      (1 & (x >> 18))) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/bool.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"AABB\"*10)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint __attribute__((noinline)) foo(int32_t y) {\n  return y * y  - 6 * y  == -8;\n}\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int32_t x = 0;\n\n  memcpy(&x, buf + 1, 4); // x 0 - 1\n\n  if (foo(x) == 1) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  } else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/boundary.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -fsanitize=bounds -o %t.ubsan %s\n// RUN: env KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 | FileCheck %s\n// CHECK: runtime error: index {{.*}} out of bounds for type 'char[26]'\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int index = 0;\n  char alphabet[26] = {0};\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&index, sizeof(index), 1, fp);\n  fclose(fp);\n\n  // BUG: out-of-boundary\n  printf(\"%c\\n\", alphabet[index]);\n}\n"
  },
  {
    "path": "tests/boundary2.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -fsanitize=bounds -o %t.ubsan %s\n// RUN: env KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 | FileCheck %s\n// CHECK: runtime error: index {{.*}} out of bounds for type 'char[26]'\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int index = 0;\n  char alphabet[26] = {0};\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&index, sizeof(index), 1, fp);\n  fclose(fp);\n\n  // BUG: off-by-one if index == 26\n  if (index > sizeof(alphabet)) {\n    printf(\"Bad\\n\");\n    return 0;\n  }\n\n  printf(\"%c\\n\", alphabet[index]);\n}\n"
  },
  {
    "path": "tests/boundary3.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -fsanitize=bounds -o %t.ubsan %s\n// RUN: env KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 | FileCheck %s\n// CHECK: runtime error: index {{.*}} out of bounds for type 'char[26]'\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int index = 0;\n  char alphabet[26] = {0};\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&index, sizeof(index), 1, fp);\n  fclose(fp);\n\n  // BUG: index can be negative\n  if (index >= 26) {\n    printf(\"Bad\\n\");\n    return 0;\n  }\n\n  printf(\"%c\\n\", alphabet[index]);\n}\n"
  },
  {
    "path": "tests/boundary4.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: env KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env TAINT_OPTIONS=\"debug=1 trace_bounds=1\" %t.fg %t.out/id-0-0-1 2>&1 | FileCheck %s\n// CHECK: ERROR: OOB overflow\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int index = 0;\n  char *alphabet = malloc(26);\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&index, sizeof(index), 1, fp);\n  fclose(fp);\n\n  // BUG: off-by-one if index == 26\n  if (index > 26U) {\n    printf(\"Bad\\n\");\n    return 0;\n  }\n\n  printf(\"%c\\n\", alphabet[index]);\n  free(alphabet);\n}\n"
  },
  {
    "path": "tests/boundary5.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env TAINT_OPTIONS=\"debug=1 trace_bounds=1\" %t.fg %t.out/id-0-0-1 2>&1 | FileCheck %s\n// CHECK: ERROR: OOB underflow\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int index = 0;\n  char *alphabet = malloc(26);;\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&index, sizeof(index), 1, fp);\n  fclose(fp);\n\n  // BUG: index can be negative\n  if (index >= 26) {\n    printf(\"Bad\\n\");\n    return 0;\n  }\n\n  // oob write\n  alphabet[index] = 'A';\n  free(alphabet);\n}\n"
  },
  {
    "path": "tests/boundary6.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x01\\x00\\x00\\x00')\" > %t.bin\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env TAINT_OPTIONS=\"debug=1 trace_bounds=1\" %t.fg %t.out/id-0-0-0 2>&1 | FileCheck %s\n// CHECK: ERROR: OOB overflow\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int size = 0;\n  char alphabet[26] = {0};\n  char buf[26] = {0};\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&size, sizeof(size), 1, fp);\n  fclose(fp);\n\n  // BUG: out-of-boundary\n  memcpy(buf, alphabet, size);\n}\n"
  },
  {
    "path": "tests/boundary7.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x01\\x00\\x00\\x00')\" > %t.bin\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env TAINT_OPTIONS=\"debug=1 trace_bounds=1\" %t.fg %t.out/id-0-0-0 2>&1 | FileCheck %s\n// CHECK: ERROR: OOB overflow\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int size = 0;\n  char alphabet[26] = {0};\n  char *buffer = malloc(26);\n\n  for (int i = 0; i < 26; i++) {\n    alphabet[i] = 'A' + i;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&size, sizeof(size), 1, fp);\n  fclose(fp);\n\n  // oob write\n  memcpy(buffer, alphabet, size);\n  free(alphabet);\n}\n"
  },
  {
    "path": "tests/bounds.cpp",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00'*3)\" > %t.bin\n// RUN: clang++ -fsanitize=bounds -o %t.ubsan %s\n// RUN: env KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang++ -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 FileCheck %s --check-prefix=CHECK-A2\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 FileCheck %s --check-prefix=CHECK-A2\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-2 2>&1 FileCheck %s --check-prefix=CHECK-B-3\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-3 2>&1 FileCheck %s --check-prefix=CHECK-B-3\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-4 2>&1 FileCheck %s --check-prefix=CHECK-C-4\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-5 2>&1 FileCheck %s --check-prefix=CHECK-C-4\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  int i = 0, j = 0, k = 0;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&i, sizeof(i), 1, fp);\n  chk_fread(&j, sizeof(j), 1, fp);\n  chk_fread(&k, sizeof(k), 1, fp);\n  fclose(fp);\n\n  int arr[2][3][4] = {};\n\n  return arr[i][j][k];\n  // CHECK-A-2: bounds.cpp:[[@LINE-1]]:10: runtime error: index {{.*}} out of bounds for type 'int[2][3][4]'\n  // CHECK-B-3: bounds.cpp:[[@LINE-2]]:10: runtime error: index {{.*}} out of bounds for type 'int[3][4]'\n  // CHECK-C-4: bounds.cpp:[[@LINE-3]]:10: runtime error: index {{.*}} out of bounds for type 'int[4]'\n}\n"
  },
  {
    "path": "tests/call_fn.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nvoid  __attribute__ ((noinline))  bar(int y) {\n  if (y == 12334) {\n    // CHECK-GEN1: Good1\n    printf(\"Good1\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\");\n  }\n}\n\nvoid  __attribute__ ((noinline))  foo(int y) {\n  bar(y - 1);\n}\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int32_t y = 0;\n  memcpy(&y, buf + 4, 4); // y 4 - 7\n  foo(y);\n\n  int x = y;\n  if (x == 123) {\n    // CHECK-GEN2: Good2\n    printf(\"Good2\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/call_fn2.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint  __attribute__ ((noinline))  foo(int y) {\n  return y + 1024;\n}\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  size_t ret;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int32_t  x = 0;\n\n  memcpy(&x, buf + 4, 4);\n\n  if (foo(x) == 8731)  {\n    // CHECK-GEN: Good\n    printf(\"Good\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\");\n  }\n}\n"
  },
  {
    "path": "tests/call_fn3.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint  __attribute__ ((noinline))  foo(int y) {\n  return y == 39123;\n}\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int32_t y = 0;\n  memcpy(&y, buf + 4, 4); // y 4 - 7\n\n  if (foo(y) && buf[0] == 12) {\n    // CHECK-GEN: Good\n    printf(\"Good\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\");\n  }\n}\n"
  },
  {
    "path": "tests/cf1.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int8_t x = 0;\n  int32_t y = 0;\n  int32_t m = 0;\n\n  memcpy(&x, buf, 1);\n  memcpy(&y, buf + 8, 4);\n  memcpy(&m, buf + 15, 4);\n  int z = 0;\n  if (x == 1) {\n    z = 123;\n  } else {\n    z = 998;\n  }\n\n  if (z == 123) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/concrete_haystack.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"AAA:BB\")' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-4 | FileCheck --check-prefix=CHECK-GEN4 %s\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n    if (argc < 2) {\n        fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n        return -1;\n    }\n\n    char *haystack = \"deadbeef\\0\";  // Concrete haystack\n    char input[256] = {0};\n\n    FILE* fp = fopen(argv[1], \"rb\");\n    if (!fp) {\n        fprintf(stderr, \"Failed to open\\n\");\n        return -1;\n    }\n    size_t n = fread(input, 1, sizeof(input) - 1, fp);\n    fclose(fp);\n    input[n] = '\\0';\n\n    // Test 1: strchr with concrete haystack + symbolic needle\n    char *pos = strchr(haystack, input[0]);\n    if (pos) {\n        // CHECK-GEN1: strchr: Found\n        printf(\"strchr: Found '%c' at position %ld\\n\", input[0], pos - haystack);\n        exit(0);\n    }\n\n    // Test 2: strrchr with concrete haystack + symbolic needle\n    char *rpos = strrchr(haystack, input[1]);\n    if (rpos) {\n        // CHECK-GEN2: strrchr: Found\n        printf(\"strrchr: Found '%c' at position %ld\\n\", input[1], rpos - haystack);\n        exit(0);\n    }\n\n    char *sep = strchr(&input[2], ':');\n    if (sep) {\n        *sep = '\\0'; // split input for strstr/strpbrk tests\n    } else {\n        printf(\"Missing ':' separator\\n\");\n        exit(1);\n    }\n\n    // Test 3: strstr\n    char *spos = strstr(haystack, &input[2]);\n    if (spos) {\n        // CHECK-GEN3: strstr: Found\n        printf(\"strstr: Found substring at position %ld\\n\", spos - haystack);\n        exit(0);\n    }\n\n    // Test 4: strpbrk\n    char *pbrk_pos = strpbrk(haystack, sep + 1);\n    if (pbrk_pos) {\n        // CHECK-GEN4: strpbrk: Found\n        printf(\"strpbrk: Found character '%c' at position %ld\\n\", *pbrk_pos, pbrk_pos - haystack);\n        exit(0);\n    }\n\n    // CHECK-ORIG: Not found\n    printf(\"Not found\\n\");\n\n    return 0;\n}\n"
  },
  {
    "path": "tests/context.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: cp %t.out/id-0-0-0 %t.bin1\n// RUN: cp %t.out/id-0-0-1 %t.bin2\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin1 output_dir=%t.out\" %fgtest %t.fg %t.bin1\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN12 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin2 output_dir=%t.out\" %fgtest %t.fg %t.bin2\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN23 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: cp %t.out/id-0-0-0 %t.bin1\n// RUN: cp %t.out/id-0-0-1 %t.bin2\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin1 output_dir=%t.out\" %t.z3 %t.bin1\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN12 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin2 output_dir=%t.out\" %t.z3 %t.bin2\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN23 %s\n\n#include <stdio.h>\n#include <stdint.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint __attribute__ ((noinline)) foo(uint64_t x, uint64_t y) {\n  //int z = x - y + 10;\n  if (x + y == 3122) return 1;\n  return 0;\n}\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  size_t ret;\n\n  FILE *fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint32_t x = 0;\n  uint32_t y = 0;\n\n  memcpy(&x, buf, 4);\n  memcpy(&y, buf + 8, 4);\n\n  if (x > 41) {\n    if (foo(y, 570)) {\n      if (x == 12345) {\n        // CHECK-GEN23: GOOD4\n        printf(\"GOOD4\\n\");\n      } else {\n        // CHECK-GEN2: GOOD2\n        printf(\"GOOD2\\n\");\n      }\n    } else {\n      // CHECK-ORIG: BAD\n      printf(\"BAD\\n\");\n    }\n  } else {\n    if (foo(y, 312)) {\n      // CHECK-GEN12: GOOD3\n      printf(\"GOOD3\\n\");\n    } else {\n      // CHECK-GEN1: GOOD1\n      printf(\"GOOD1\\n\");\n    }\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/cpp_fstream.cpp",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang++ -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang++ -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: cp %t.out/id-0-0-0 %t.bin\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: cp %t.out/id-0-0-1 %t.bin\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN %s\n\n// doesn't work with in-process z3 solver\n\n#include <string>\n#include <cstdio>\n#include <cerrno>\n#include <iostream>\n#include <fstream>\n#include <cstdlib>\n#include <cstring>\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    std::cerr << \"Usage: \" << argv[0] << \"[file]\\n\";\n    return -1;\n  }\n\n  std::fstream in_file;\n  in_file.open(argv[1], std::ios::in | std::ios::binary);\n  if (!in_file.is_open()) return 0;\n\n  in_file.seekg (0, in_file.end);\n  int length = in_file.tellg();\n  in_file.seekg (0, in_file.beg);\n\n  if (length <= 3) {\n    std::cerr << \"Input too short\\n\";\n    return 0;\n  }\n\n  char *val = new char[length];\n  in_file.read(val, length);\n\n  if (val[0] == 'z' && val[1] == 'a' && val[2] == 'c') {\n    // CHECK-GEN: Good\n    std::cout << \"Good\\n\";\n  } else {\n    // CHECK-ORIG: Bad\n    std::cout << \"Bad\\n\";\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/cpp_map.cpp",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: clang++ -o %t.uninstrumented %s\n// RUN: %t.uninstrumented | FileCheck --check-prefix=CHECK-BUG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang++ -o %t.fg %s\n// RUN: %t.fg | FileCheck --check-prefix=CHECK-BUG %s\n\n#include <map>\n#include <string>\n#include <iostream>\n#include <cassert>\n\nint main() {\n  std::map<std::string, std::string> mymap;\n  std::string k1(\"key1\");\n  std::string k2(\"key2\");\n\n  mymap[k1] = \"xx1\";\n  mymap[k2] = \"xx2\";\n  \n  if (mymap[\"key1\"] == \"xx1\" && mymap[k2] == \"xx2\" && mymap[k1] != mymap[\"k2\"]) {\n    // CHECK-BUG: Good\n    std::cout << \"Good\\n\";\n  } else {\n    std::cout << \"Bad\\n\";\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/cpp_string.cpp",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang++ -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang++ -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN %s\n\n// doesn't work with in-process z3 solver\n\n#include <cerrno>\n#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <iostream>\n#include <sstream>\n#include <string>\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // if (contents.substr(0, 7) == \"iamback\") {\n  //   std::cout <<\" hhe\\n\";\n  //   abort();\n  // }\n\n  // if (contents[1] == 'y' && contents[2] == 'x') {\n  //   abort();\n  // }\n\n  std::string val(buf);\n  \n  // if (val.compare(\"deadbeef\") == 0) {\n  // if (val == \"deadbeef\") {\n  if (strcmp(val.c_str(), \"deadbeef\") == 0) {\n    // CHECK-GEN: Good\n    std::cout << \"Good\\n\";\n  } else {\n    // CHECK-ORIG: Bad\n    std::cout << \"Bad\\n\";\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/gep.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"\\x00\\x00\\x00\\x00\")' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nstruct point_t {\n  int x;\n  int y;\n};\n\nstruct graph_t {\n  struct point_t points[0x100];\n};\n\nstruct set_t {\n  struct graph_t graph[0x100];\n};\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  int index = 0;\n  char buf[0x100];\n  struct point_t points[0x100];\n  struct graph_t graph;\n  struct set_t set;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&index, 1, sizeof(index), fp);\n\n  if (&buf[index] == &buf[1]) {\n    // CHECK-GEN1: Good1\n    printf(\"Good1\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  if (&points[index] == &points[2]) {\n    // CHECK-GEN2: Good2\n    printf(\"Good2\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  if (&graph.points[index] == &graph.points[3]) {\n    // CHECK-GEN3: Good3\n    printf(\"Good3\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  if (&set.graph[index].points[4] == &set.graph[4].points[4]) {\n    // CHECK-GEN4: Good4\n    printf(\"Good4\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  fclose(fp);\n}\n"
  },
  {
    "path": "tests/if_eq.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"AABB\"*10)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint16_t x = 0;\n  uint16_t y = 0;\n\n  memcpy(&x, buf + 1, 2); // x 0 - 1\n  memcpy(&y, buf + 4, 2); // y 4 - 7\n\n  if (x == y) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/infer_type.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  size_t ret;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint16_t x = 0;\n  int32_t y = 0;\n  int32_t z = 0;\n  uint32_t a = 0;\n\n  memcpy(&x, buf + 1, 2);  // x 0 - 1\n  memcpy(&y, buf + 4, 4);  // y 4 - 7\n  memcpy(&z, buf + 10, 4); // 10 - 13\n  memcpy(&a, buf + 14, 4); // 14 - 17\n\n  uint32_t bb = x + y + z + a;\n  if (bb == 213) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/lib.h",
    "content": "#ifndef LIB_H\n#define LIB_H\n\n#include <stdio.h>\n#include <stdlib.h>\n\nFILE *chk_fopen(const char *pathname, const char *mode) {\n  FILE* fp = fopen(pathname, mode);\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    exit(0);\n  }\n  return fp;\n}\n\nvoid chk_fread(void *ptr, size_t size, size_t nmemb, FILE *stream) {\n  if (fread(ptr, size, nmemb, stream) != nmemb) {\n    fprintf(stderr, \"Failed to read\\n\");\n    exit(0);\n  }\n}\n\n\n#endif /* LIB_H */\n"
  },
  {
    "path": "tests/lit.cfg",
    "content": "import os\nimport sys\nimport re\nimport platform\n\ntry:\n  import lit.util\n  import lit.formats\nexcept ImportError:\n  pass\n\nconfig.name = \"SymSan\"\nconfig.test_format = lit.formats.ShTest(execute_external=False)\n\nconfig.suffixes = ['.c', '.cpp']\nconfig.test_source_root = os.path.join(config.source_dir, \"tests\")\n\n\nbin_dir = os.path.join(config.install_dir, \"bin\")\nif not os.path.exists(bin_dir):\n  lit_config.fatal(\"Cannot find install directory: {}\".format(bin_dir))\n\nif not os.path.exists(config.llvm_bin_dir):\n  lit_config.fatal(\"Cannot find llvm tool directory: {}\".format(config.llvm_bin_dir))\n\npath = os.path.pathsep.join([\n        config.llvm_bin_dir,\n        config.environment['PATH']\n      ])\n\nconfig.environment['PATH'] = path\n# config.environment['KO_CC'] = 'clang-14'\n# config.environment['KO_CXX'] = 'clang++-14'\n\nconfig.substitutions.append(('%ko-clang', os.path.join(bin_dir, \"ko-clang\")))\nconfig.substitutions.append(('%ko-clangxx', os.path.join(bin_dir, \"ko-clang++\")))\nconfig.substitutions.append(('%fgtest', os.path.join(bin_dir, \"fgtest\")))\n"
  },
  {
    "path": "tests/lit.site.cfg.in",
    "content": "config.build_dir = \"@CMAKE_BINARY_DIR@\"\nconfig.source_dir = \"@CMAKE_SOURCE_DIR@\"\nconfig.install_dir = \"@CMAKE_INSTALL_PREFIX@\"\nconfig.llvm_bin_dir = \"@LLVM_TOOLS_BINARY_DIR@\"\n\nlit_config.load_config(config, \"@CMAKE_SOURCE_DIR@/tests/lit.cfg\")\n"
  },
  {
    "path": "tests/memchr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n\n  void *p = memchr(buf, 0x7f, n);\n  if (p != NULL) {\n    // CHECK-GEN: Found byte\n    printf(\"Found byte\\n\");\n  } else {\n    // CHECK-ORIG: No byte\n    printf(\"No byte\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/memchr_chain.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration: finds first colon\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration: finds second colon using output from first\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test chained memchr with bounded length from previous result:\n// t1 = memchr(buf, c1, len); t2 = memchr(buf, c2, t1-buf);\n// This tests forward search (indexof) with substr constraint\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // First memchr: find first ';' in the buffer\n  char *t1 = (char *)memchr(buf, ';', n);\n  if (t1) {\n    // Second memchr: find ':' that appears after the ';'\n    // This uses the bounded search pattern: memchr(t1, ':', n - (t1 - buf))\n    size_t len_after_t1 = n - (t1 - buf);\n    char *t2 = (char *)memchr(t1, ':', len_after_t1);\n    if (t2) {\n      // CHECK-GEN: Found colon before semicolon\n      printf(\"Found colon before semicolon (colon at %ld, semicolon at %ld)\\n\",\n             (long)(t2 - buf), (long)(t1 - buf));\n    } else {\n      printf(\"Found semicolon but no colon before it (semicolon at %ld)\\n\",\n             (long)(t1 - buf));\n    }\n  } else {\n    // CHECK-ORIG: No semicolon\n    printf(\"No semicolon\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/memchr_mixed_chain.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration: finds last semicolon (backward search)\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration: finds colon before the semicolon (forward search with bound)\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test mixed chain: memrchr (backward) followed by memchr (forward with bound)\n// t1 = memrchr(buf, ';', len);  // find LAST semicolon\n// t2 = memchr(buf, ':', t1-buf); // find first colon BEFORE the semicolon\n// This tests combining last_indexof and indexof with substr constraint\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // First: memrchr finds LAST ';' (backward search / last_indexof)\n  char *t1 = (char *)memrchr(buf, ';', n);\n  if (t1) {\n    // Second: memchr finds first ':' BEFORE the ';' (forward search with bound)\n    size_t len_before_t1 = t1 - buf;\n    char *t2 = (char *)memchr(buf, ':', len_before_t1);\n    if (t2) {\n      // CHECK-GEN: Found colon before last semicolon\n      printf(\"Found colon before last semicolon (colon at %ld, semicolon at %ld)\\n\",\n             (long)(t2 - buf), (long)(t1 - buf));\n    } else {\n      printf(\"Found semicolon but no colon before it (semicolon at %ld)\\n\",\n             (long)(t1 - buf));\n    }\n  } else {\n    // CHECK-ORIG: No semicolon\n    printf(\"No semicolon\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/memcmp.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  char b[10] = {1, 1, 1, 1, 1, 2, 3, 4, 5, 0};\n\n  if (memcmp(b, buf, 9) == 0) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/memmem.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test memmem: find substring with explicit lengths\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n\n  // memmem with explicit lengths (needle is \"ab\\0c\" including null byte)\n  const char needle[] = \"ABCD\";\n  void *t1 = memmem(buf, n, needle, 4);\n  if (t1) {\n    // CHECK-GEN: Found pattern\n    printf(\"Found pattern at position %ld\\n\", (long)((char*)t1 - buf));\n  } else {\n    // CHECK-ORIG: Pattern not found\n    printf(\"Pattern not found\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/memrchr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n\n  void *p = memrchr(buf, 0x7f, n);\n  if (p != NULL) {\n    // CHECK-GEN: Found byte\n    printf(\"Found byte\\n\");\n  } else {\n    // CHECK-ORIG: No byte\n    printf(\"No byte\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/memrchr_chain.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration: finds last colon (searching from end)\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration: finds second-to-last colon using output from first\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test chained memrchr: t1 = memrchr(h, c, len); t2 = memrchr(h, c, t1-h);\n// This tests reverse search (last_indexof) and pointer arithmetic\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // First memrchr: find LAST colon (searching from end)\n  char *t1 = (char *)memrchr(buf, ':', n);\n  if (t1) {\n    // Second memrchr: find second-to-last colon (search from start up to t1)\n    size_t len_before_t1 = t1 - buf;\n    char *t2 = (char *)memrchr(buf, ':', len_before_t1);\n    if (t2) {\n      // CHECK-GEN: Found two colons (last at\n      printf(\"Found two colons (last at %ld, second-to-last at %ld)\\n\",\n             (long)(t1 - buf), (long)(t2 - buf));\n    } else {\n      printf(\"Found one colon (at %ld)\\n\", (long)(t1 - buf));\n    }\n  } else {\n    // CHECK-ORIG: No colons\n    printf(\"No colons\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/mini.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-5 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out debug=1\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-5 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint16_t x = 0;\n  int32_t y = 0;\n  int32_t z = 0;\n  uint32_t a = 0;\n\n  memcpy(&x, buf + 1, 2);  // x 1 - 2\n  memcpy(&y, buf + 4, 4);  // y 4 - 7\n  memcpy(&z, buf + 10, 4); // 10 - 13\n  memcpy(&a, buf + 14, 4); // 14 - 17\n  if (x > 12300 && x < 12350 && z < -100000000 && z > -100000005 &&\n      z != -100000003 && y >= 987654321 && y <= 987654325 && a == 123456789) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/mini2.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*32)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-4 | FileCheck --check-prefix=CHECK-GEN5 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-5 | FileCheck --check-prefix=CHECK-GEN6 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-4 | FileCheck --check-prefix=CHECK-GEN5 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-5 | FileCheck --check-prefix=CHECK-GEN6 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[32];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int32_t a = 0;\n  int32_t b = 0;\n  int32_t c = 0;\n  int32_t d = 0;\n  int32_t e = 0;\n  int32_t f = 0;\n\n  memcpy(&a, buf, 4);\n  memcpy(&b, buf + 4, 4);\n  memcpy(&c, buf + 8, 4);\n  memcpy(&d, buf + 12, 4);\n  memcpy(&e, buf + 16, 4);\n  memcpy(&f, buf + 20, 4);\n\n  if (a == 0xff) {\n    // CHECK-GEN1: Good1\n    printf(\"Good1\\n\");\n  }\n\n  if (a < b) {\n    // CHECK-GEN2: Good2\n    printf(\"Good2\\n\");\n  }\n\n  if (c + b == 10) {\n    // CHECK-GEN3: Good3\n    printf(\"Good3\\n\");\n  }\n\n  if (d  == 0xcc) {\n    // CHECK-GEN4: Good4\n    printf(\"Good4\\n\");\n  }\n\n  if (e - f == 0xdeadbeef) {\n    // CHECK-GEN5: Good5\n    printf(\"Good5\\n\");\n  }\n\n  if (f != b) {\n    // CHECK-GEN6: Good6\n    printf(\"Good6\\n\");\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/optimistic.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\xef\\xbe\\xad\\xde')\" > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  uint32_t x = 0;\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&x, sizeof(x), 1, fp);\n  fclose(fp);\n\n  if (x == 0xdeadbeef) {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  if (x == 0xbadf00d) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/partial_concrete.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20], copy[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  memcpy(copy, buf, sizeof(buf));\n  copy[3] = 0x80;\n\n  if (buf[0] != 0x80\n      && *(int*)copy == 0x80adbeef) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/partial_concrete2.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20], copy[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  memset(copy, 0, sizeof(copy));\n  copy[2] = buf[0];\n\n  if (*(int*)copy == 0x00ee0000) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/partial_concrete3.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20], copy[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  memset(copy, 0, sizeof(copy));\n  copy[1] = buf[0];\n\n  if (*(int*)copy == 0x0000ee00) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/pointer.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  void* x = NULL;\n  memcpy(&x, buf, sizeof x);\n  if (!x) {\n    // CHECK-GEN: Good\n    printf(\"Good\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\");\n  }\n}\n"
  },
  {
    "path": "tests/prefixof.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN2 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n// Provide simple implementations for uninstrumented build\nint prefixof(const char *str, const char *prefix) {\n  size_t prefix_len = strlen(prefix);\n  size_t str_len = strlen(str);\n  if (str_len >= prefix_len && memcmp(str, prefix, prefix_len) == 0) {\n    return 1;\n  }\n  return 0;\n}\n\nint suffixof(const char *str, const char *suffix) {\n  size_t suffix_len = strlen(suffix);\n  size_t str_len = strlen(str);\n  if (str_len >= suffix_len &&\n      memcmp(str + (str_len - suffix_len), suffix, suffix_len) == 0) {\n    return 1;\n  }\n  return 0;\n}\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  if (prefixof(buf, \"hello\")) {\n    // CHECK-GEN1: Has prefix\n    printf(\"Has prefix\\n\");\n  } else {\n    // CHECK-ORIG: No prefix\n    printf(\"No prefix\\n\");\n  }\n\n  if (suffixof(buf, \"world\")) {\n    // CHECK-GEN2: Has suffix\n    printf(\"Has suffix\\n\");\n  } else {\n    // CHECK-ORIG: No suffix\n    printf(\"No suffix\\n\");\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/shift_and.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint32_t x = 0;\n\n  memcpy(&x, buf, 4);\n  /* if ((int)(x & 0xFF) == 12) { */\n  if (((int)(x >> 24) & 0xFF) == 11 && ((int)(x >> 16) & 0xFF) == 22 &&\n      ((int)(x >> 8) & 0xFF) == 33 && (int)(x & 0xFF) == 44) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/sign.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  uint16_t x = 0;\n\n  memcpy(&x, buf + 1, 2);\n\n  // if y is less than 32 bits, it has not nsw flag.\n  int16_t y = x * -3;\n\n  if (y == -12) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/str_mem_mixed_chain.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration: finds semicolon with strchr\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration: finds last colon before semicolon with memrchr\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test mixed chain: strchr followed by memrchr with bounded length\n// t1 = strchr(buf, ';');        // find first semicolon (forward, null-terminated)\n// t2 = memrchr(buf, ':', t1-buf); // find LAST colon before the semicolon (backward, bounded)\n// This tests combining strchr (indexof) and memrchr (last_indexof with substr)\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // First: strchr finds first ';' (forward search, null-terminated)\n  char *t1 = strchr(buf, ';');\n  if (t1) {\n    // Second: memrchr finds LAST ':' before the ';' (backward search, bounded)\n    size_t len_before_t1 = t1 - buf;\n    char *t2 = (char *)memrchr(buf, ':', len_before_t1);\n    if (t2) {\n      // CHECK-GEN: Found last colon before first semicolon\n      printf(\"Found last colon before first semicolon (colon at %ld, semicolon at %ld)\\n\",\n             (long)(t2 - buf), (long)(t1 - buf));\n    } else {\n      printf(\"Found semicolon but no colon before it (semicolon at %ld)\\n\",\n             (long)(t1 - buf));\n    }\n  } else {\n    // CHECK-ORIG: No semicolon\n    printf(\"No semicolon\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strcat.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A_A\")' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out enum_gep=0\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strcat concatenates two parts of tainted input, then compare\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  char prefix[20] = {0};\n  char suffix[20] = {0};\n\n  char *pos = strchr(buf, '_');\n  if (pos) {\n    size_t prefix_len = pos - buf;\n    strncpy(prefix, buf, prefix_len);\n    prefix[prefix_len] = '\\0';\n    strcpy(suffix, pos + 1);\n  } else {\n    printf(\"No _ found in input\\n\");\n    return -1;\n  }\n\n  // Concatenate the two parts\n  char result[256] = {0};\n  strcpy(result, prefix);\n  strcat(result, suffix);\n\n  // Compare concatenated result\n  if (strcmp(result, \"deadbeef\") == 0) {\n    // CHECK-GEN: Match found\n    printf(\"Match found: %s\\n\", result);\n  } else {\n    // CHECK-ORIG: No match\n    printf(\"No match: %s\\n\", result);\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strcat_mixed.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A_A\")' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out enum_gep=0 debug=1\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strcat concatenates two parts of tainted input, then compare\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, 20, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  char prefix[20] = {0};\n  char suffix[20] = {0};\n\n  char *pos = strchr(buf, '_');\n  if (pos) {\n    *pos = '\\0';\n    strcpy(prefix, buf);\n    strcpy(suffix, pos + 1);\n  } else {\n    printf(\"No _ found in input\\n\");\n    return -1;\n  }\n\n  // Concatenate the two parts\n  char result[256] = {0};\n  strcpy(result, prefix);\n  strcat(result, suffix);\n\n  // Compare concatenated result\n  if (strcmp(result, \"deadbeef\") == 0) {\n    // CHECK-GEN: Match found\n    printf(\"Match found: %s\\n\", result);\n  } else {\n    // CHECK-ORIG: No match\n    printf(\"No match: %s\\n\", result);\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strchr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  char *p = strchr(buf, ':');\n  if (p != NULL) {\n    // CHECK-GEN: Found colon\n    printf(\"Found colon\\n\");\n  } else {\n    // CHECK-ORIG: No colon\n    printf(\"No colon\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strchr_chain.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration: finds first colon\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration: finds second colon using output from first\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test chained strchr: t1 = strchr(h, c1); t2 = strchr(t1+1, c2);\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  char *t1 = strchr(buf, ':');\n  if (t1) {\n    char *t2 = strchr(t1 + 1, ':');\n    if (t2) {\n      // CHECK-GEN: Found two colons\n      printf(\"Found two colons\\n\");\n    } else {\n      printf(\"Found one colon\\n\");\n    }\n  } else {\n    // CHECK-ORIG: No colons\n    printf(\"No colons\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strchr_mixed_chain.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration: finds first colon\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration: finds semicolon before the colon (backward search via pointer chain)\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test mixed chain: strchr (forward) followed by strrchr (backward from result)\n// t1 = strrchr(buf, ':');     // find last colon\n// t2 = strchr(t1, ';'); // find first semicolon after the colon\n// This tests combining last_indexof (strrchr) with indexof (strchr) via pointer chain\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // First: strrchr finds last ':' (backward search)\n  char *t1 = strrchr(buf, ':');\n  if (t1) {\n    // Second: strchr finds first ';' after the colon (forward search from t1)\n    char *t2 = strchr(t1, ';');\n    if (t2) {\n      // CHECK-GEN: Found semicolon after colon\n      printf(\"Found semicolon after colon (colon at %ld, semicolon at %ld)\\n\",\n             (long)(t1 - buf), (long)(t2 - buf));\n    } else {\n      printf(\"Found colon but no semicolon after it (colon at %ld)\\n\",\n             (long)(t1 - buf));\n    }\n  } else {\n    // CHECK-ORIG: No colon\n    printf(\"No colon\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strcmp.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  char a[10];\n  char b[10] = {1, 1, 1, 1, 1, 2, 3, 4, 5, 0};\n\n  /* int dd = memcmp(buf, \"12313\", 5); */\n  /* if (dd) { */\n  /*   printf(\"hey, you hit it \\n\"); */\n  /* } */\n\n  memcpy(a, buf, 9);\n  a[9] = 0;\n\n  if (strcmp(a, b) == 0) {\n    // CHECK-GEN1: Good1\n    printf(\"Good1\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad1\n    printf(\"Bad1\\n\");\n  }\n\n  a[4] += 10;\n  if (strcmp(a, b) == 0) {\n    // CHECK-GEN2: Good2\n    printf(\"Good2\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad2\n    printf(\"Bad2\\n\");\n  }\n\n  a[4] += 244;\n  if (strcmp(a, b) == 0) {\n    // CHECK-GEN3: Good3\n    printf(\"Good3\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad3\n    printf(\"Bad3\\n\");\n  }\n\n  a[4] -= 99;\n  if (strcmp(a, b) == 0) {\n    // CHECK-GEN4: Good4\n    printf(\"Good4:\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad4\n    printf(\"Bad4\\n\");\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/strcmp2.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// TODO: RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// TODO: RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// TODO: RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// TODO: RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  char a[20] = {\n    1, 1, 1, 1, 7,\n    8, 9, 1, 45, 2,\n    88, 1, 1, 2, 3,\n    4, 5, 0\n  };\n\n\n  char b[10] = {1, 1, 1, 1,\n                1, 2, 3, 4, 5, 0};\n\n  if (strcmp(buf, a) == 0) {\n    // CHECK-GEN1: Good1\n    printf(\"Good1\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n\n  if (strcmp(buf, b) == 0) {\n    // CHECK-GEN2: Good2\n    printf(\"Good2\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/strdup.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strdup duplicates tainted string, then compare the duplicate\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Duplicate the string\n  char *dup = strdup(buf);\n  if (!dup) {\n    fprintf(stderr, \"strdup failed\\n\");\n    return -1;\n  }\n\n  // Compare the duplicate\n  if (strcmp(dup, \"secret\") == 0) {\n    // CHECK-GEN: Match found\n    printf(\"Match found: %s\\n\", dup);\n  } else {\n    // CHECK-ORIG: No match\n    printf(\"No match: %s\\n\", dup);\n  }\n\n  free(dup);\n  return 0;\n}\n"
  },
  {
    "path": "tests/strlen_extend.c",
    "content": "// Test strlen extending - input needs to grow to reach target length\n// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: printf 'short\\0' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) return 1;\n\n  char buf[64];\n  FILE *f = fopen(argv[1], \"rb\");\n  if (!f) return 1;\n\n  size_t n = fread(buf, 1, sizeof(buf) - 1, f);\n  fclose(f);\n\n  size_t len = strlen(buf);\n  printf(\"strlen: %zu\\n\", len);\n\n  if (len == 15) {\n    // CHECK-GEN: SUCCESS\n    printf(\"SUCCESS: strlen == 15\\n\");\n  } else {\n    // CHECK-ORIG: NOT-15\n    printf(\"NOT-15: strlen = %zu\\n\", len);\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/strlen_json3.c",
    "content": "// Test: strlen constraint in JSON context\n// When shrinking, DELETE should remove bytes so JSON remains valid\n// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: printf '{\"name\":\"HELLO WORLD HELLO WORLD\",\"age\":25}' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s <input_file>\\n\", argv[0]);\n    return 1;\n  }\n\n  char buf[256];\n  FILE *f = fopen(argv[1], \"r\");\n  if (!f) {\n    perror(\"fopen\");\n    return 1;\n  }\n\n  size_t n = fread(buf, 1, sizeof(buf) - 1, f);\n  buf[n] = '\\0';\n  fclose(f);\n\n  printf(\"Input: %s\\n\", buf);\n\n  // Find name value\n  char *start = strstr(buf, \"\\\"name\\\":\\\"\");\n  if (!start) {\n    printf(\"Field not found\\n\");\n    return 0;\n  }\n\n  start += 8; // skip \"name\":\"\n\n  // Find closing quote\n  char *end = strchr(start, '\"');\n  if (!end) {\n    printf(\"Malformed JSON - no closing quote\\n\");\n    return 0;\n  }\n\n  // Temporarily null-terminate for strlen\n  *end = '\\0';\n  size_t len = strlen(start);\n  *end = '\"';\n\n  printf(\"Name value: \\\"%.*s\\\" (len=%zu)\\n\", (int)len, start, len);\n\n  if (len == 5) {\n    // CHECK-GEN: SUCCESS\n    printf(\"SUCCESS: Found name with exactly 5 chars!\\n\");\n  } else {\n    // CHECK-ORIG: NOT-5\n    printf(\"NOT-5: len = %zu\\n\", len);\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/strlen_null_from_input.c",
    "content": "// Test strlen with null terminator from input file\n// The input has embedded null, so strlen stops there\n// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: printf 'HELLO WORLD\\0extra' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) return 1;\n\n  char buf[64];\n  FILE *f = fopen(argv[1], \"rb\");\n  if (!f) return 1;\n\n  size_t n = fread(buf, 1, sizeof(buf) - 1, f);\n  // Don't add null - rely on the null from input\n  fclose(f);\n\n  // Only call strlen if we know there's a null in the buffer\n  size_t len = strlen(buf);\n  printf(\"strlen: %zu\\n\", len);\n\n  if (len == 5) {\n    // CHECK-GEN: SUCCESS\n    printf(\"SUCCESS: strlen == 5\\n\");\n  } else {\n    // CHECK-ORIG: NOT-5\n    printf(\"NOT-5: strlen = %zu\\n\", len);\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/strlen_shrink.c",
    "content": "// Test: shrinking strlen by deleting bytes\n// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: printf 'HELLO WORLD' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s <input_file>\\n\", argv[0]);\n    return 1;\n  }\n\n  char buf[64];\n  FILE *f = fopen(argv[1], \"r\");\n  if (!f) {\n    perror(\"fopen\");\n    return 1;\n  }\n\n  size_t n = fread(buf, 1, sizeof(buf) - 1, f);\n  buf[n] = '\\0';\n  fclose(f);\n\n  // This is the strlen we're solving\n  size_t len = strlen(buf);\n  printf(\"strlen returned: %zu\\n\", len);\n\n  // Show what bytes are actually in the buffer\n  printf(\"Buffer contents (hex): \");\n  for (size_t i = 0; i < 15 && i < n; i++) {\n    printf(\"%02x \", (unsigned char)buf[i]);\n  }\n  printf(\"\\n\");\n\n  if (len == 5) {\n    // CHECK-GEN: SUCCESS\n    printf(\"SUCCESS: Found input with strlen=5\\n\");\n  } else {\n    // CHECK-ORIG: NOT-5\n    printf(\"NOT-5: strlen=%zu\\n\", len);\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/strlen_test.c",
    "content": "// Test: strlen constraints for various length comparisons\n// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: printf 'HELLO WORLD' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 KO_DONT_OPTIMIZE=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s <input_file>\\n\", argv[0]);\n    return 1;\n  }\n\n  char buf[64];\n  FILE *f = fopen(argv[1], \"r\");\n  if (!f) {\n    perror(\"fopen\");\n    return 1;\n  }\n  size_t n = fread(buf, 1, 63, f);\n  buf[n] = '\\0';\n  fclose(f);\n\n  size_t len = strlen(buf);\n  printf(\"strlen = %zu\\n\", len);\n\n  if (len > 10) {\n    // CHECK-ORIG: Long string\n    printf(\"Long string (> 10)!\\n\");\n  }\n  if (len == 5) {\n    // CHECK-GEN2: Exact length 5\n    printf(\"Exact length 5!\\n\");\n  }\n  if (len < 3) {\n    // CHECK-GEN1: Short string\n    printf(\"Short string (< 3)!\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strncat.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// First iteration finds colon\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strncat with symbolic length from strchr result\n// Pattern: find delimiter, append prefix to base, compare result\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Find colon delimiter - this makes len symbolic\n  char *sep = strchr(buf, ':');\n  if (sep) {\n    size_t len = sep - buf;  // symbolic length\n\n    // Start with a base string\n    char result[256] = \"prefix_\";\n\n    // Append first 'len' bytes of buf to result\n    // This uses strncat with symbolic n!\n    strncat(result, buf, len);\n\n    // Compare the concatenated result\n    if (strcmp(result, \"prefix_key\") == 0) {\n      // CHECK-GEN: Match found\n      printf(\"Match found: %s\\n\", result);\n    } else {\n      printf(\"No match: %s (len=%zu)\\n\", result, len);\n    }\n  } else {\n    // CHECK-ORIG: No colon\n    printf(\"No colon found\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strncpy_simple.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// First iteration finds colon, second iteration solves prefix constraint\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-1-1 output_dir=%t.out session_id=2 enum_gep=0\" %fgtest %t.fg %t.out/id-0-1-1\n// RUN: %t.uninstrumented %t.out/id-0-2-2 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strchr to find delimiter, strncpy prefix, check first byte\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Find colon delimiter\n  char *sep = strchr(buf, ':');\n  if (sep) {\n    // Extract prefix before the colon\n    size_t len = sep - buf;\n    char prefix[20];\n    strncpy(prefix, buf, len);\n    prefix[len] = '\\0';\n\n    if (len > 1) {\n      // Simple check: first byte equals 'X'\n      if (prefix[0] == 'X') {\n        // CHECK-GEN: Found X prefix\n        printf(\"Found X prefix before colon\\n\");\n      } else {\n        // CHECK-COLON: First char is\n        printf(\"First char is '%c' (0x%02x), not 'X'\\n\", prefix[0], (unsigned char)prefix[0]);\n      }\n    } else {\n      printf(\"Prefix too short\\n\");\n    }\n  } else {\n    // CHECK-ORIG: No colon\n    printf(\"No colon found\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strncpy_substr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration sees colon, solves key='username'\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration solves value='password'\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1 enum_gep=0\" %fgtest %t.fg %t.out/id-0-0-0\n// Third iteration checks both key and value constraints\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-1-1 output_dir=%t.out session_id=2 enum_gep=0\" %fgtest %t.fg %t.out/id-0-1-1\n// RUN: %t.uninstrumented %t.out/id-0-2-2 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: key-value parsing pattern \"key:value\"\n// strchr finds the delimiter, strncpy extracts key, pointer arithmetic extracts value\n// Expectations are placed on both key and value to test symbolic strncpy length\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t nread = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[nread] = '\\0';\n  size_t buflen = strlen(buf);\n\n  // Find colon delimiter (key:value separator)\n  char *sep = strchr(buf, ':');\n  if (sep) {\n    // Extract key (prefix before the colon)\n    size_t len = sep - buf;\n    char key[20] = {0};  // Initialize to avoid kInitializingLabel\n    char value[20] = {0};\n    strncpy(key, buf, len);\n    key[len] = '\\0';\n    strcpy(value, sep + 1);\n\n    // Check if there's a value part after the colon\n    // size_t sep_offset = sep - buf;\n    // if (sep_offset + 1 < buflen) {\n    //   // Safe to copy value part\n    //   size_t value_len = buflen - (sep_offset + 1);\n    //   strncpy(value, sep + 1, value_len);\n    //   value[value_len] = '\\0';\n    // }\n    // else: value remains empty string\n\n    // This tests constraints on both parts of the key:value pattern\n    if (strcmp(key, \"username\") == 0 && strcmp(value, \"password\") == 0) {\n      // CHECK-GEN: Found\n      printf(\"Found\\n\");\n    } else {\n      // BAD\n      printf(\"Key='%s' (len=%zu), Value='%s'\\n\", key, len, value);\n    }\n  } else {\n    // CHECK-ORIG: No colon found\n    printf(\"No colon found\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strndup.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// First iteration finds colon\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// Second iteration solves key constraint\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strndup with symbolic length from strchr result\n// Pattern: find delimiter, duplicate prefix using strndup, compare\n\n#define _GNU_SOURCE\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Find colon delimiter - this makes len symbolic\n  char *sep = strchr(buf, ':');\n  if (sep) {\n    size_t len = sep - buf;  // symbolic length\n\n    // Duplicate first 'len' bytes using strndup\n    // This uses strndup with symbolic n!\n    char *key = strndup(buf, len);\n    if (!key) {\n      fprintf(stderr, \"strndup failed\\n\");\n      return -1;\n    }\n\n    // Compare the duplicated key\n    if (strcmp(key, \"user\") == 0) {\n      // CHECK-GEN: Match found\n      printf(\"Match found: key=%s\\n\", key);\n    } else {\n      printf(\"No match: key=%s (len=%zu)\\n\", key, len);\n    }\n\n    free(key);\n  } else {\n    // CHECK-ORIG: No colon found\n    printf(\"No colon found\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strnstr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s -lbsd\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// First iteration finds delimiter\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.out/id-0-0-0 output_dir=%t.out session_id=1\" %fgtest %t.fg %t.out/id-0-0-0\n// RUN: %t.uninstrumented %t.out/id-0-1-1 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test: strnstr with symbolic length from strchr result\n// Pattern: find delimiter, then search for pattern only within prefix before delimiter\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nextern char *strnstr(const char *haystack, const char *needle, size_t len);\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Find delimiter - this makes len symbolic\n  char *sep = strchr(buf, ':');\n  if (sep) {\n    size_t len = sep - buf;  // symbolic length\n\n    // Search for \"key\" only within the first 'len' bytes (before delimiter)\n    // This tests strnstr with symbolic n parameter\n    char *found = strnstr(buf, \"key\", len);\n\n    if (found != NULL) {\n      // CHECK-GEN: Found key before delimiter\n      printf(\"Found key before delimiter at position %ld\\n\", found - buf);\n    } else {\n      printf(\"No key in first %zu bytes\\n\", len);\n    }\n  } else {\n    // CHECK-ORIG: No delimiter\n    printf(\"No delimiter found\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strpbrk.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n// Test strpbrk: find first character from a set\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Find first occurrence of any of ':' or ';' or '='\n  char *t1 = strpbrk(buf, \":;=\");\n  if (t1) {\n    // CHECK-GEN: Found delimiter\n    printf(\"Found delimiter '%c' at position %ld\\n\", *t1, (long)(t1 - buf));\n  } else {\n    // CHECK-ORIG: No delimiter\n    printf(\"No delimiter\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strrchr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  char *p = strrchr(buf, '/');\n  if (p != NULL) {\n    // CHECK-GEN: Found slash\n    printf(\"Found slash\\n\");\n  } else {\n    // CHECK-ORIG: No slash\n    printf(\"No slash\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strstr.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  if (strstr(buf, \"magic\") != NULL) {\n    // CHECK-GEN: Found magic\n    printf(\"Found magic\\n\");\n  } else {\n    // CHECK-ORIG: No magic\n    printf(\"No magic\\n\");\n  }\n  return 0;\n}\n"
  },
  {
    "path": "tests/strsub.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n// Implementation matching xmlStrsub signature\n// Extracts substring starting at 'start' with length 'len'\nchar *xmlStrsub(const char *str, int start, int len) {\n  if (str == NULL) return NULL;\n  if (start < 0) return NULL;\n  if (len < 0) return NULL;\n\n  // Skip to start position\n  int i;\n  for (i = 0; i < start; i++) {\n    if (*str == 0) return NULL;\n    str++;\n  }\n  if (*str == 0) return NULL;\n\n  // Duplicate len characters (like xmlStrndup)\n  size_t actual_len = strlen(str);\n  if ((size_t)len > actual_len) len = actual_len;\n\n  char *ret = (char *)malloc(len + 1);\n  if (ret == NULL) return NULL;\n  memcpy(ret, str, len);\n  ret[len] = '\\0';\n  return ret;\n}\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[256] = {0};\n  FILE* fp = fopen(argv[1], \"rb\");\n  if (!fp) {\n    fprintf(stderr, \"Failed to open\\n\");\n    return -1;\n  }\n  size_t n = fread(buf, 1, sizeof(buf) - 1, fp);\n  fclose(fp);\n  buf[n] = '\\0';\n\n  // Extract substring from position 5, length 5\n  // If input is \"xxxxxhello...\", substr should be \"hello\"\n  char *sub = xmlStrsub(buf, 5, 5);\n  if (sub != NULL) {\n    if (strcmp(sub, \"hello\") == 0) {\n      // CHECK-GEN: Found hello\n      printf(\"Found hello\\n\");\n    } else {\n      // CHECK-ORIG: No match\n      printf(\"No match\\n\");\n    }\n    free(sub);\n  } else {\n    printf(\"Substr failed\\n\");\n  }\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/struct.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nstruct point_t {\n  int x;\n  int y;\n};\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  int x = 0;\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&x, 1, sizeof(x), fp);\n  fclose(fp);\n\n  struct point_t p = { x, 1 };\n\n  if (p.x == 1) {\n    // CHECK-GEN: Good\n    printf(\"Good1\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/switch.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-4 | FileCheck --check-prefix=CHECK-GEN5 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-5 | FileCheck --check-prefix=CHECK-GEN6 %s\n// RUN: env KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN1 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-1 | FileCheck --check-prefix=CHECK-GEN2 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-2 | FileCheck --check-prefix=CHECK-GEN3 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-3 | FileCheck --check-prefix=CHECK-GEN4 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-4 | FileCheck --check-prefix=CHECK-GEN5 %s\n// RUN: %t.uninstrumented %t.out/id-0-0-5 | FileCheck --check-prefix=CHECK-GEN6 %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main(int argc, char **argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  char buf[20];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  int b = 0;\n  memcpy(&b, buf + 2, 4);\n  int x = 0;\n  memcpy(&x, buf + 6, 4);\n\n  switch (b) {\n  case 12312213:\n    // CHECK-GEN1: Good1\n    printf(\"Good1\\n\");\n    break;\n  case 13201000:\n    // CHECK-GEN2: Good2\n    printf(\"Good2\\n\");\n    break;\n  case -1111:\n    // CHECK-GEN3: Good3\n    printf(\"Good3\\n\");\n    break;\n  case 3330000:\n    // CHECK-GEN4: Good4\n    printf(\"Good4\\n\");\n    break;\n  case 5888:\n    // CHECK-GEN5: Good5\n    printf(\"Good5\\n\");\n    break;\n  case -897978:\n    // CHECK-GEN6: Good6\n    printf(\"Good6\\n\");\n    break;\n  default:\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n    break;\n  }\n}\n"
  },
  {
    "path": "tests/ubsan_div.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -fsanitize=undefined -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 | FileCheck %s\n// CHECK: runtime error: division by zero\n// CHECK: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int dividend = 0, divisor = 0;\n\n  chk_fread(&dividend, sizeof(dividend), 1, fp);\n  chk_fread(&divisor, sizeof(divisor), 1, fp);\n  fclose(fp);\n  printf(\"%d\\n\", dividend / divisor);\n}\n"
  },
  {
    "path": "tests/ubsan_intovfl.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -O0 -fsanitize=unsigned-integer-overflow -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 | FileCheck %s\n// CHECK: runtime error: unsigned integer overflow\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  size_t size = 0;\n\n  chk_fread(&size, sizeof(size), 1, fp);\n  fclose(fp);\n\n  char* ptr = malloc(size + 1);\n  memset(ptr, 0, size);\n  free(ptr);\n}\n"
  },
  {
    "path": "tests/ubsan_mulovfl.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -O0 -fsanitize=signed-integer-overflow,unsigned-integer-overflow -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 | FileCheck %s --check-prefix=MULOVFL\n// MULOVFL: runtime error: {{signed|unsigned}} integer overflow\n// MULOVFL: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n\n// Test multiplication overflow detection\n// Multiplication can overflow even with small-looking numbers\n// e.g., 65536 * 65536 overflows 32-bit\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int32_t a = 0, b = 0;\n\n  chk_fread(&a, sizeof(a), 1, fp);\n  chk_fread(&b, sizeof(b), 1, fp);\n  fclose(fp);\n\n  // Start with a=2, b=3 (result=6, no overflow)\n  // Solver should find values that cause overflow\n  // e.g., a=0x10000, b=0x10000 -> 0x100000000 (overflows 32-bit)\n  int32_t result = a * b;\n  printf(\"%d\\n\", result);\n  return 0;\n}\n"
  },
  {
    "path": "tests/ubsan_negateovfl.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -O0 -fsanitize=signed-integer-overflow -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 | FileCheck %s --check-prefix=NEGOVFL\n// NEGOVFL: runtime error: negation of\n// NEGOVFL: cannot be represented\n\n// Test negate overflow detection\n// -INT_MIN overflows because INT_MIN = -2147483648 and +2147483648 doesn't fit in int32_t\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int32_t x = 0;\n\n  chk_fread(&x, sizeof(x), 1, fp);\n  fclose(fp);\n\n  // Start with x=0 (no overflow when negating)\n  // Solver should find x=INT_MIN (-2147483648) which overflows when negated\n  int32_t y = -x;\n  printf(\"%d -> %d\\n\", x, y);\n  return 0;\n}\n"
  },
  {
    "path": "tests/ubsan_shift.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -fsanitize=undefined -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 | FileCheck %s --check-prefix=LARGE\n// LARGE: runtime error: shift exponent\n// LARGE: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 | FileCheck %s --check-prefix=NEG\n// NEG: runtime error: shift exponent\n// NEG: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-2 2>&1 | FileCheck %s --check-prefix=OVFL\n// OVFL: runtime error: left shift\n// OVFL: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-3 2>&1 | FileCheck %s --check-prefix=NEGB\n// NEGB: runtime error: left shift of negative value\n// NEGB: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int base = 0, shift = 0;\n\n  chk_fread(&base, sizeof(base), 1, fp);\n  chk_fread(&shift, sizeof(shift), 1, fp);\n  fclose(fp);\n  printf(\"%d\\n\", base << shift);\n}\n"
  },
  {
    "path": "tests/ubsan_signchange.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -O0 -fsanitize=implicit-integer-sign-change -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-1 2>&1 | FileCheck %s --check-prefix=SIGNCHANGE\n// SIGNCHANGE: runtime error: implicit conversion from type\n// SIGNCHANGE: changed the value to\n\n// Test implicit integer sign change detection\n// Sign change occurs when truncating a positive value to negative or vice versa\n// Example: int32_t 128 -> int8_t -128 (sign changes from positive to negative)\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int32_t x = 0;\n\n  chk_fread(&x, sizeof(x), 1, fp);\n  fclose(fp);\n\n  // Start with x=0 (no sign change when truncating to int8_t)\n  // Solver should find e.g., x=128 which becomes -128 as int8_t (sign change)\n  // or x=-129 which becomes 127 as int8_t (sign change)\n  // Use implicit conversion (no explicit cast) to trigger ubsan\n  int8_t y = x;\n  printf(\"%d -> %d\\n\", x, y);\n  return 0;\n}\n"
  },
  {
    "path": "tests/ubsan_signed_intovfl.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -O0 -fsanitize=signed-integer-overflow -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 | FileCheck %s --check-prefix=ADDOVFL\n// ADDOVFL: runtime error: signed integer overflow\n// ADDOVFL: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n\n// Test signed integer overflow detection\n// The runtime should detect when addition can overflow in both directions:\n// - Positive overflow: INT_MAX + positive -> negative\n// - Negative overflow: INT_MIN + negative -> positive\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int32_t a = 0, b = 0;\n\n  chk_fread(&a, sizeof(a), 1, fp);\n  chk_fread(&b, sizeof(b), 1, fp);\n  fclose(fp);\n\n  // Start with a=0, b=1 (no overflow)\n  // Solver should find a=INT_MAX, b=1 (overflow)\n  // or a=INT_MIN, b=-1 (underflow)\n  int32_t result = a + b;\n  printf(\"%d\\n\", result);\n  return 0;\n}\n"
  },
  {
    "path": "tests/ubsan_subovfl.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00')\" > %t.bin\n// RUN: clang -O0 -fsanitize=signed-integer-overflow -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 | FileCheck %s --check-prefix=SUBOVFL\n// SUBOVFL: runtime error: signed integer overflow\n// SUBOVFL: SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior\n\n// Test signed subtraction overflow detection\n// Overflow occurs when:\n//   - INT_MAX - (-1) wraps to negative (positive overflow)\n//   - INT_MIN - 1 wraps to positive (negative overflow)\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int32_t a = 0, b = 0;\n\n  chk_fread(&a, sizeof(a), 1, fp);\n  chk_fread(&b, sizeof(b), 1, fp);\n  fclose(fp);\n\n  // Start with a=0, b=1 (no overflow)\n  // Solver should find e.g., a=INT_MIN, b=1 (underflow)\n  // or a=INT_MAX, b=-1 (overflow)\n  int32_t result = a - b;\n  printf(\"%d\\n\", result);\n  return 0;\n}\n"
  },
  {
    "path": "tests/ubsan_trunc.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c\"import sys; sys.stdout.buffer.write(b'\\x00\\x00\\x00\\x00' * 4)\" > %t.bin\n// RUN: clang -fsanitize=implicit-integer-truncation -o %t.ubsan %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 KO_SOLVE_UB=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out solve_ub=1\" %fgtest %t.fg %t.bin\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-0 2>&1 FileCheck %s --check-prefix=CHECK-V0\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-3 2>&1 FileCheck %s --check-prefix=CHECK-V1\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-4 2>&1 FileCheck %s --check-prefix=CHECK-V2\n// RUN: not env UBSAN_OPTIONS=\"halt_on_error=1\" %t.ubsan %t.out/id-0-0-7 2>&1 FileCheck %s --check-prefix=CHECK-V3\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\n// Test plan:\n//  * Two types - int and char\n//  * Two signs - signed and unsigned\n//  * Square that - we have input and output types.\n// Thus, there are total of (2*2)^2 == 16 tests.\n// These are all the possible variations/combinations of casts.\n// However, not all of them should result in the check.\n// So here, we *only* check which should and which should not result in checks.\n\nuint32_t convert_unsigned_int_to_unsigned_int(uint32_t x) {\n#line 100\n  return x;\n}\n\nuint8_t convert_unsigned_char_to_unsigned_char(uint8_t x) {\n#line 200\n  return x;\n}\n\nint32_t convert_signed_int_to_signed_int(int32_t x) {\n#line 300\n  return x;\n}\n\nint8_t convert_signed_char_to_signed_char(int8_t x) {\n#line 400\n  return x;\n}\n\nuint8_t convert_unsigned_int_to_unsigned_char(uint32_t x) {\n#line 500\n  return x;\n}\n\nuint32_t convert_unsigned_char_to_unsigned_int(uint8_t x) {\n#line 600\n  return x;\n}\n\nint32_t convert_unsigned_char_to_signed_int(uint8_t x) {\n#line 700\n  return x;\n}\n\nint32_t convert_signed_char_to_signed_int(int8_t x) {\n#line 800\n  return x;\n}\n\nint32_t convert_unsigned_int_to_signed_int(uint32_t x) {\n#line 900\n  return x;\n}\n\nuint32_t convert_signed_int_to_unsigned_int(int32_t x) {\n#line 1000\n  return x;\n}\n\nuint8_t convert_signed_int_to_unsigned_char(int32_t x) {\n#line 1100\n  return x;\n}\n\nuint8_t convert_signed_char_to_unsigned_char(int8_t x) {\n#line 1200\n  return x;\n}\n\nint8_t convert_unsigned_char_to_signed_char(uint8_t x) {\n#line 1300\n  return x;\n}\n\nuint32_t convert_signed_char_to_unsigned_int(int8_t x) {\n#line 1400\n  return x;\n}\n\nint8_t convert_unsigned_int_to_signed_char(uint32_t x) {\n#line 1500\n  return x;\n}\n\nint8_t convert_signed_int_to_signed_char(int32_t x) {\n#line 1600\n  return x;\n}\n\n#line 1111 // !!!\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return 0;\n  }\n\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  int x[4] = {0};\n\n  chk_fread(x, sizeof(x), 1, fp);\n  fclose(fp);\n\n  convert_unsigned_int_to_unsigned_char((unsigned)x[0]);\n// CHECK-V0: ubsan_trunc.c:500:10: runtime error: implicit conversion from type '{{.*}}' (aka 'unsigned int') of value {{.*}} (32-bit, unsigned) to type '{{.*}}' (aka 'unsigned char') changed the value to {{.*}} (8-bit, unsigned)\n  convert_signed_int_to_unsigned_char(x[1]);\n// CHECK-V1: ubsan_trunc.c:1100:10: runtime error: implicit conversion from type '{{.*}}' (aka 'int') of value {{.*}} (32-bit, signed) to type '{{.*}}' (aka 'unsigned char') changed the value to {{.*}} (8-bit, unsigned)\n  convert_unsigned_int_to_signed_char((unsigned)x[2]);\n// CHECK-V2: ubsan_trunc.c:1500:10: runtime error: implicit conversion from type '{{.*}}' (aka 'unsigned int') of value {{.*}} (32-bit, unsigned) to type '{{.*}}' (aka '{{(signed )?}}char') changed the value to {{.*}} (8-bit, signed)\n  convert_signed_int_to_signed_char(x[3]);\n// CHECK-V3: ubsan_trunc.c:1600:10: runtime error: implicit conversion from type '{{.*}}' (aka 'int') of value {{.*}} (32-bit, signed) to type '{{.*}}' (aka '{{(signed )?}}char') changed the value to {{.*}} (8-bit, signed)\n\n  return 0;\n}\n"
  },
  {
    "path": "tests/unaligned_load.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*20)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  int buf[5];\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(buf, 1, sizeof(buf), fp);\n  fclose(fp);\n\n  buf[0] += 0xbadf00d;\n  buf[2] = *(int*)((void*)buf + 1);\n\n  if (buf[2] == 0xdeadbeef) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  }\n  else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "tests/xor_bool.c",
    "content": "// RUN: rm -rf %t.out\n// RUN: mkdir -p %t.out\n// RUN: python -c'print(\"A\"*4 + \"B\"*4)' > %t.bin\n// RUN: clang -o %t.uninstrumented %s\n// RUN: %t.uninstrumented %t.bin | FileCheck --check-prefix=CHECK-ORIG %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_FASTGEN=1 %ko-clang -o %t.fg %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %fgtest %t.fg %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n// RUN: env KO_DONT_OPTIMIZE=1 KO_USE_Z3=1 %ko-clang -o %t.z3 %s\n// RUN: env TAINT_OPTIONS=\"taint_file=%t.bin output_dir=%t.out\" %t.z3 %t.bin\n// RUN: %t.uninstrumented %t.out/id-0-0-0 | FileCheck --check-prefix=CHECK-GEN %s\n\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"lib.h\"\n\nint __attribute__((noinline)) foo(int32_t y) {\n  return y * y  - 6 * y  == -8;\n}\n\nint main (int argc, char** argv) {\n  if (argc < 2) {\n    fprintf(stderr, \"Usage: %s [file]\\n\", argv[0]);\n    return -1;\n  }\n\n  int x = 0;\n  int y = 0;\n  FILE* fp = chk_fopen(argv[1], \"rb\");\n  chk_fread(&x, 1, sizeof(x), fp);\n  chk_fread(&y, 1, sizeof(y), fp);\n  fclose(fp);\n\n  if (x == 0xdeadbeef ^ y == 0xbadf00d) {\n    // CHECK-GEN: Good\n    printf(\"Good\\n\");\n  } else {\n    // CHECK-ORIG: Bad\n    printf(\"Bad\\n\");\n  }\n}\n"
  },
  {
    "path": "wrappers/CMakeLists.txt",
    "content": "set(CMAKE_POSITION_INDEPENDENT_CODE ON)\n\n## custom model\n# add_library(ZlibRt STATIC zlib_func.c)\n# install (TARGETS ZlibRt DESTINATION ${SYMSAN_LIB_DIR})\ninstall (FILES \"zlib_abilist.txt\" DESTINATION ${SYMSAN_LIB_DIR})\n"
  },
  {
    "path": "wrappers/zlib_abilist.txt",
    "content": "fun:adler32=uninstrumented\nfun:adler32_combine=uninstrumented\nfun:adler32_combine64=uninstrumented\nfun:adler32_z=uninstrumented\nfun:crc32=uninstrumented\nfun:crc32_combine=uninstrumented\nfun:crc32_combine64=uninstrumented\nfun:crc32_z=uninstrumented\nfun:get_crc_table=uninstrumented\nfun:deflate=uninstrumented\nfun:deflateBound=uninstrumented\nfun:deflateCopy=uninstrumented\nfun:deflateEnd=uninstrumented\nfun:deflateGetDictionary=uninstrumented\nfun:deflateInit_=uninstrumented\nfun:deflateInit2_=uninstrumented\nfun:deflateParams=uninstrumented\nfun:deflatePending=uninstrumented\nfun:deflatePrime=uninstrumented\nfun:deflateReset=uninstrumented\nfun:deflateResetKeep=uninstrumented\nfun:deflateSetDictionary=uninstrumented\nfun:deflateSetHeader=uninstrumented\nfun:deflateTune=uninstrumented\nfun:inflateBack=uninstrumented\nfun:inflateBackEnd=uninstrumented\nfun:inflateBackInit_=uninstrumented\nfun:inflate_fast=uninstrumented\nfun:inflate=uninstrumented\nfun:inflateCodesUsed=uninstrumented\nfun:inflateCopy=uninstrumented\nfun:inflateEnd=uninstrumented\nfun:inflateGetDictionary=uninstrumented\nfun:inflateGetHeader=uninstrumented\nfun:inflateInit_=uninstrumented\nfun:inflateInit2_=uninstrumented\nfun:inflateMark=uninstrumented\nfun:inflatePrime=uninstrumented\nfun:inflateReset=uninstrumented\nfun:inflateReset2=uninstrumented\nfun:inflateResetKeep=uninstrumented\nfun:inflateSetDictionary=uninstrumented\nfun:inflateSync=uninstrumented\nfun:inflateSyncPoint=uninstrumented\nfun:inflateUndermine=uninstrumented\nfun:inflateValidate=uninstrumented\nfun:inflate_table=uninstrumented\nfun:_tr_align=uninstrumented\nfun:_tr_flush_bits=uninstrumented\nfun:_tr_flush_block=uninstrumented\nfun:_tr_init=uninstrumented\nfun:_tr_stored_block=uninstrumented\nfun:_tr_tally=uninstrumented\nfun:gzflags=uninstrumented\nfun:zcalloc=uninstrumented\nfun:zcfree=uninstrumented\nfun:zError=uninstrumented\nfun:zlibCompileFlags=uninstrumented\nfun:zlibVersion=uninstrumented\nfun:compress=uninstrumented\nfun:compress2=uninstrumented\nfun:compressBound=uninstrumented\nfun:uncompress=uninstrumented\nfun:uncompress2=uninstrumented\nfun:gzclose=uninstrumented\nfun:gzbuffer=uninstrumented\nfun:gzclearerr=uninstrumented\nfun:gzdopen=uninstrumented\nfun:gzeof=uninstrumented\nfun:gzerror=uninstrumented\nfun:gz_error=uninstrumented\nfun:gzoffset=uninstrumented\nfun:gzoffset64=uninstrumented\nfun:gzopen=uninstrumented\nfun:gzopen64=uninstrumented\nfun:gzrewind=uninstrumented\nfun:gzseek=uninstrumented\nfun:gzseek64=uninstrumented\nfun:gztell=uninstrumented\nfun:gztell64=uninstrumented\nfun:gzclose_r=uninstrumented\nfun:gzdirect=uninstrumented\nfun:gzfread=uninstrumented\nfun:gzgetc=uninstrumented\nfun:gzgetc_=uninstrumented\nfun:gzgets=uninstrumented\nfun:gzread=uninstrumented\nfun:gzungetc=uninstrumented\nfun:gzclose_w=uninstrumented\nfun:gzflush=uninstrumented\nfun:gzfwrite=uninstrumented\nfun:gzprintf=uninstrumented\nfun:gzputc=uninstrumented\nfun:gzputs=uninstrumented\nfun:gzsetparams=uninstrumented\nfun:gzvprintf=uninstrumented\nfun:gzwrite=uninstrumented\nfun:adler32=discard\nfun:adler32_combine=discard\nfun:adler32_combine64=discard\nfun:adler32_z=discard\nfun:crc32=discard\nfun:crc32_combine=discard\nfun:crc32_combine64=discard\nfun:crc32_z=discard\nfun:get_crc_table=discard\nfun:deflate=discard\nfun:deflateBound=discard\nfun:deflateCopy=discard\nfun:deflateEnd=discard\nfun:deflateGetDictionary=discard\nfun:deflateInit_=discard\nfun:deflateInit2_=discard\nfun:deflateParams=discard\nfun:deflatePending=discard\nfun:deflatePrime=discard\nfun:deflateReset=discard\nfun:deflateResetKeep=discard\nfun:deflateSetDictionary=discard\nfun:deflateSetHeader=discard\nfun:deflateTune=discard\nfun:inflateBack=discard\nfun:inflateBackEnd=discard\nfun:inflateBackInit_=discard\nfun:inflate_fast=discard\nfun:inflate=discard\nfun:inflateCodesUsed=discard\nfun:inflateCopy=discard\nfun:inflateEnd=discard\nfun:inflateGetDictionary=discard\nfun:inflateGetHeader=discard\nfun:inflateInit_=discard\nfun:inflateInit2_=discard\nfun:inflateMark=discard\nfun:inflatePrime=discard\nfun:inflateReset=discard\nfun:inflateReset2=discard\nfun:inflateResetKeep=discard\nfun:inflateSetDictionary=discard\nfun:inflateSync=discard\nfun:inflateSyncPoint=discard\nfun:inflateUndermine=discard\nfun:inflateValidate=discard\nfun:inflate_table=discard\nfun:_tr_align=discard\nfun:_tr_flush_bits=discard\nfun:_tr_flush_block=discard\nfun:_tr_init=discard\nfun:_tr_stored_block=discard\nfun:_tr_tally=discard\nfun:gzflags=discard\nfun:zcalloc=discard\nfun:zcfree=discard\nfun:zError=discard\nfun:zlibCompileFlags=discard\nfun:zlibVersion=discard\nfun:compress=discard\nfun:compress2=discard\nfun:compressBound=discard\nfun:uncompress=discard\nfun:uncompress2=discard\nfun:gzclose=discard\nfun:gzbuffer=discard\nfun:gzclearerr=discard\nfun:gzdopen=discard\nfun:gzeof=discard\nfun:gzerror=discard\nfun:gz_error=discard\nfun:gzoffset=discard\nfun:gzoffset64=discard\nfun:gzopen=discard\nfun:gzopen64=discard\nfun:gzrewind=discard\nfun:gzseek=discard\nfun:gzseek64=discard\nfun:gztell=discard\nfun:gztell64=discard\nfun:gzclose_r=discard\nfun:gzdirect=discard\nfun:gzfread=discard\nfun:gzgetc=discard\nfun:gzgetc_=discard\nfun:gzgets=discard\nfun:gzread=discard\nfun:gzungetc=discard\nfun:gzclose_w=discard\nfun:gzflush=discard\nfun:gzfwrite=discard\nfun:gzprintf=discard\nfun:gzputc=discard\nfun:gzputs=discard\nfun:gzsetparams=discard\nfun:gzvprintf=discard\nfun:gzwrite=discard\n"
  }
]